aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--configure.py7
-rw-r--r--tensorflow/compiler/tf2xla/functionalize_control_flow.cc4
-rw-r--r--tensorflow/compiler/tf2xla/kernels/gather_op.cc12
-rw-r--r--tensorflow/compiler/xla/client/computation_builder.cc9
-rw-r--r--tensorflow/compiler/xla/client/computation_builder.h23
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment.cc5
-rw-r--r--tensorflow/compiler/xla/service/cpu/llvm_ir_runtime.cc2
-rw-r--r--tensorflow/compiler/xla/service/executable.h10
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.cc13
-rw-r--r--tensorflow/compiler/xla/service/hlo_runner.cc31
-rw-r--r--tensorflow/compiler/xla/service/hlo_runner.h16
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc5
-rw-r--r--tensorflow/compiler/xla/service/service.cc116
-rw-r--r--tensorflow/compiler/xla/service/service.h3
-rw-r--r--tensorflow/compiler/xla/service/user_computation.cc267
-rw-r--r--tensorflow/compiler/xla/service/user_computation.h8
-rw-r--r--tensorflow/compiler/xla/tests/compute_constant_test.cc45
-rw-r--r--tensorflow/compiler/xla/tests/while_test.cc105
-rw-r--r--tensorflow/compiler/xla/tools/parser/hlo_parser.cc71
-rw-r--r--tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc2
-rw-r--r--tensorflow/compiler/xla/xla.proto2
-rw-r--r--tensorflow/contrib/batching/adaptive_shared_batch_scheduler.h7
-rw-r--r--tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py2
-rwxr-xr-xtensorflow/contrib/cmake/tf_python.cmake1
-rw-r--r--tensorflow/contrib/data/__init__.py2
-rw-r--r--tensorflow/contrib/eager/python/examples/mnist/mnist.py4
-rw-r--r--tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py4
-rw-r--r--tensorflow/contrib/estimator/BUILD64
-rw-r--r--tensorflow/contrib/estimator/python/estimator/replicate_model_fn.py470
-rw-r--r--tensorflow/contrib/estimator/python/estimator/replicate_model_fn_test.py901
-rw-r--r--tensorflow/contrib/framework/BUILD27
-rw-r--r--tensorflow/contrib/framework/__init__.py2
-rw-r--r--tensorflow/contrib/framework/python/framework/__init__.py1
-rw-r--r--tensorflow/contrib/framework/python/framework/graph_util.py128
-rw-r--r--tensorflow/contrib/framework/python/framework/graph_util_test.py61
-rw-r--r--tensorflow/contrib/framework/python/ops/__init__.py1
-rw-r--r--tensorflow/contrib/framework/python/ops/sort_ops.py113
-rw-r--r--tensorflow/contrib/framework/python/ops/sort_ops_test.py95
-rw-r--r--tensorflow/contrib/summary/summary_ops.py4
-rw-r--r--tensorflow/contrib/tpu/profiler/BUILD7
-rw-r--r--tensorflow/contrib/tpu/profiler/tf_op_stats.proto127
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu_estimator.py2
-rw-r--r--tensorflow/core/BUILD14
-rw-r--r--tensorflow/core/api_def/api_test.cc288
-rw-r--r--tensorflow/core/api_def/base_api/api_def_A.pbtxt670
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Abort.pbtxt16
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Abs.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AccumulateNV2.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AccumulatorApplyGradient.pbtxt32
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AccumulatorNumAccumulated.pbtxt16
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AccumulatorSetGlobalStep.pbtxt20
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AccumulatorTakeGradient.pbtxt36
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Acos.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Acosh.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Add.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AddManySparseToTensorsMap.pbtxt68
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AddN.pbtxt10
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AddSparseToTensorsMap.pbtxt58
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AddV2.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AdjustContrast.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AdjustContrastv2.pbtxt36
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AdjustHue.pbtxt30
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AdjustSaturation.pbtxt30
-rw-r--r--tensorflow/core/api_def/base_api/api_def_All.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AllCandidateSampler.pbtxt80
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Angle.pbtxt23
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Any.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyAdadelta.pbtxt65
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyAdagrad.pbtxt46
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyAdagradDA.pbtxt65
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyAdam.pbtxt90
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyCenteredRMSProp.pbtxt86
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyFtrl.pbtxt73
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyFtrlV2.pbtxt75
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyGradientDescent.pbtxt35
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyMomentum.pbtxt62
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyProximalAdagrad.pbtxt58
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyProximalGradientDescent.pbtxt51
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApplyRMSProp.pbtxt72
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ApproximateEqual.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ArgMax.pbtxt15
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ArgMin.pbtxt15
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AsString.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Asin.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Asinh.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Assert.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Assign.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AssignAdd.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AssignAddVariableOp.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AssignSub.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AssignSubVariableOp.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AssignVariableOp.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Atan.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Atan2.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Atanh.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AudioSpectrogram.pbtxt63
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AudioSummary.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AudioSummaryV2.pbtxt50
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AvgPool.pbtxt48
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AvgPool3D.pbtxt46
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AvgPool3DGrad.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_AvgPoolGrad.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_B.pbtxt448
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Barrier.pbtxt55
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BarrierClose.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BarrierIncompleteSize.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BarrierInsertMany.pbtxt35
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BarrierReadySize.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BarrierTakeMany.pbtxt68
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchCholesky.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchCholeskyGrad.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchDataset.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchFFT.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchFFT2D.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchFFT3D.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchIFFT.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchIFFT2D.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchIFFT3D.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchMatMul.pbtxt54
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchMatrixBandPart.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchMatrixDeterminant.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchMatrixDiag.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchMatrixDiagPart.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchMatrixInverse.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchMatrixSetDiag.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchMatrixSolve.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchMatrixSolveLs.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchMatrixTriangularSolve.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchNormWithGlobalNormalization.pbtxt57
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt86
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchSelfAdjointEig.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchSelfAdjointEigV2.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchSvd.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchToSpace.pbtxt104
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BatchToSpaceND.pbtxt139
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Betainc.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BiasAdd.pbtxt38
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BiasAddGrad.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BiasAddV1.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Bincount.pbtxt40
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt18
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BitwiseAnd.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BitwiseOr.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BitwiseXor.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BroadcastArgs.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_BroadcastGradientArgs.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Bucketize.pbtxt38
-rw-r--r--tensorflow/core/api_def/base_api/api_def_C.pbtxt513
-rw-r--r--tensorflow/core/api_def/base_api/api_def_CTCBeamSearchDecoder.pbtxt72
-rw-r--r--tensorflow/core/api_def/base_api/api_def_CTCGreedyDecoder.pbtxt61
-rw-r--r--tensorflow/core/api_def/base_api/api_def_CTCLoss.pbtxt70
-rw-r--r--tensorflow/core/api_def/base_api/api_def_CacheDataset.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Cast.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Ceil.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_CheckNumerics.pbtxt14
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Cholesky.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_CholeskyGrad.pbtxt30
-rw-r--r--tensorflow/core/api_def/base_api/api_def_CompareAndBitpack.pbtxt53
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Complex.pbtxt20
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ComplexAbs.pbtxt10
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ComputeAccidentalHits.pbtxt62
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Concat.pbtxt27
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ConcatOffset.pbtxt36
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ConcatV2.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ConcatenateDataset.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ConditionalAccumulator.pbtxt44
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Conj.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ConjugateTranspose.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Const.pbtxt10
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ControlTrigger.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Conv2D.pbtxt72
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Conv2DBackpropFilter.pbtxt57
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Conv2DBackpropInput.pbtxt56
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Conv3D.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Conv3DBackpropFilter.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Conv3DBackpropFilterV2.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Conv3DBackpropInput.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Conv3DBackpropInputV2.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Cos.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Cosh.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_CountUpTo.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_CropAndResize.pbtxt74
-rw-r--r--tensorflow/core/api_def/base_api/api_def_CropAndResizeGradBoxes.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_CropAndResizeGradImage.pbtxt53
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Cross.pbtxt27
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Cumprod.pbtxt61
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Cumsum.pbtxt61
-rw-r--r--tensorflow/core/api_def/base_api/api_def_D.pbtxt790
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DatasetToSingleElement.pbtxt16
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DebugGradientIdentity.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DecodeAndCropJpeg.pbtxt86
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DecodeBase64.pbtxt20
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DecodeBmp.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DecodeCSV.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DecodeGif.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DecodeJSONExample.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DecodeJpeg.pbtxt80
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DecodePng.pbtxt39
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DecodeRaw.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DecodeWav.pbtxt50
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DeleteSessionTensor.pbtxt10
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DenseToDenseSetOperation.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DenseToSparseBatchDataset.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DenseToSparseSetOperation.pbtxt70
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DepthToSpace.pbtxt101
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNative.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt60
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt60
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Dequantize.pbtxt91
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DeserializeIterator.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DeserializeManySparse.pbtxt60
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DestroyResourceOp.pbtxt21
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DestroyTemporaryVariable.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Diag.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DiagPart.pbtxt36
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Digamma.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Dilation2D.pbtxt67
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Dilation2DBackpropFilter.pbtxt48
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Dilation2DBackpropInput.pbtxt48
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Div.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DrawBoundingBoxes.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DynamicPartition.pbtxt55
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DynamicStitch.pbtxt68
-rw-r--r--tensorflow/core/api_def/base_api/api_def_E.pbtxt261
-rw-r--r--tensorflow/core/api_def/base_api/api_def_EditDistance.pbtxt96
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Elu.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_EluGrad.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_EncodeBase64.pbtxt30
-rw-r--r--tensorflow/core/api_def/base_api/api_def_EncodeJpeg.pbtxt89
-rw-r--r--tensorflow/core/api_def/base_api/api_def_EncodePng.pbtxt35
-rw-r--r--tensorflow/core/api_def/base_api/api_def_EncodeWav.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Enter.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Equal.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Erf.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Erfc.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Exit.pbtxt20
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Exp.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ExpandDims.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Expm1.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ExtractGlimpse.pbtxt77
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ExtractImagePatches.pbtxt57
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ExtractJpegShape.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_F.pbtxt411
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FFT.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FFT2D.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FFT3D.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FIFOQueue.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FIFOQueueV2.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Fact.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxArgs.pbtxt13
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt23
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVars.pbtxt16
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt50
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt53
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FakeQueue.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Fill.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FilterDataset.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FixedLengthRecordDataset.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FixedLengthRecordReader.pbtxt50
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FixedLengthRecordReaderV2.pbtxt59
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FixedUnigramCandidateSampler.pbtxt144
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FlatMapDataset.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Floor.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FloorDiv.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FloorMod.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FractionalAvgPool.pbtxt90
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FractionalAvgPoolGrad.pbtxt59
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FractionalMaxPool.pbtxt114
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FractionalMaxPoolGrad.pbtxt58
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FusedBatchNorm.pbtxt99
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FusedBatchNormGrad.pbtxt102
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FusedBatchNormGradV2.pbtxt108
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FusedBatchNormV2.pbtxt105
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FusedPadConv2D.pbtxt50
-rw-r--r--tensorflow/core/api_def/base_api/api_def_FusedResizeAndPadConv2D.pbtxt64
-rw-r--r--tensorflow/core/api_def/base_api/api_def_G.pbtxt257
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Gather.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt123
-rw-r--r--tensorflow/core/api_def/base_api/api_def_GatherV2.pbtxt54
-rw-r--r--tensorflow/core/api_def/base_api/api_def_GenerateVocabRemapping.pbtxt68
-rw-r--r--tensorflow/core/api_def/base_api/api_def_GetSessionHandle.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_GetSessionHandleV2.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_GetSessionTensor.pbtxt22
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Greater.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_GreaterEqual.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_GroupByWindowDataset.pbtxt14
-rw-r--r--tensorflow/core/api_def/base_api/api_def_H.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_HSVToRGB.pbtxt23
-rw-r--r--tensorflow/core/api_def/base_api/api_def_HashTable.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_HashTableV2.pbtxt51
-rw-r--r--tensorflow/core/api_def/base_api/api_def_HistogramFixedWidth.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_HistogramSummary.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_I.pbtxt518
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IFFT.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IFFT2D.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IFFT3D.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IRFFT.pbtxt43
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IRFFT2D.pbtxt44
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IRFFT3D.pbtxt44
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Identity.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IdentityN.pbtxt21
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IdentityReader.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IdentityReaderV2.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Igamma.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Igammac.pbtxt18
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IgnoreErrorsDataset.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Imag.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ImageSummary.pbtxt70
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ImmutableConst.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_InTopK.pbtxt44
-rw-r--r--tensorflow/core/api_def/base_api/api_def_InTopKV2.pbtxt44
-rw-r--r--tensorflow/core/api_def/base_api/api_def_InitializeTable.pbtxt23
-rw-r--r--tensorflow/core/api_def/base_api/api_def_InitializeTableFromTextFile.pbtxt54
-rw-r--r--tensorflow/core/api_def/base_api/api_def_InitializeTableFromTextFileV2.pbtxt56
-rw-r--r--tensorflow/core/api_def/base_api/api_def_InitializeTableV2.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_InterleaveDataset.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Inv.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_InvGrad.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Invert.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_InvertPermutation.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IsFinite.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IsInf.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IsNan.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IsVariableInitialized.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Iterator.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IteratorFromStringHandle.pbtxt30
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IteratorGetNext.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_IteratorToStringHandle.pbtxt16
-rw-r--r--tensorflow/core/api_def/base_api/api_def_L.pbtxt392
-rw-r--r--tensorflow/core/api_def/base_api/api_def_L2Loss.pbtxt21
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LMDBReader.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LRN.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LRNGrad.pbtxt53
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LearnedUnigramCandidateSampler.pbtxt86
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LeftShift.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Less.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LessEqual.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Lgamma.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LinSpace.pbtxt39
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ListDiff.pbtxt54
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LoadAndRemapMatrix.pbtxt105
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Log.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Log1p.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LogMatrixDeterminant.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LogSoftmax.pbtxt21
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LogUniformCandidateSampler.pbtxt86
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LogicalAnd.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LogicalNot.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LogicalOr.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LookupTableExport.pbtxt23
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LookupTableExportV2.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LookupTableFind.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LookupTableFindV2.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LookupTableImport.pbtxt27
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LookupTableImportV2.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LookupTableInsert.pbtxt27
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LookupTableInsertV2.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LookupTableSize.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LookupTableSizeV2.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_LoopCond.pbtxt20
-rw-r--r--tensorflow/core/api_def/base_api/api_def_M.pbtxt749
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MakeIterator.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MapAndBatchDataset.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MapClear.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MapDataset.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MapIncompleteSize.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MapPeek.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MapSize.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MapStage.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MapUnstage.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MapUnstageNoKey.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatMul.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatchingFiles.pbtxt20
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixBandPart.pbtxt71
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixDeterminant.pbtxt21
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixDiag.pbtxt44
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixDiagPart.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixInverse.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixSetDiag.pbtxt36
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixSolve.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixSolveLs.pbtxt68
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixTriangularSolve.pbtxt57
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Max.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPool.pbtxt45
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPool3D.pbtxt46
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPool3DGrad.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPool3DGradGrad.pbtxt58
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPoolGrad.pbtxt58
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPoolGradGrad.pbtxt57
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPoolGradGradV2.pbtxt57
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPoolGradGradWithArgmax.pbtxt48
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPoolGradV2.pbtxt57
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPoolGradWithArgmax.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPoolV2.pbtxt45
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MaxPoolWithArgmax.pbtxt51
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Maximum.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Mean.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Merge.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MergeSummary.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MergeV2Checkpoints.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Mfcc.pbtxt51
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Min.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Minimum.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MirrorPad.pbtxt60
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MirrorPadGrad.pbtxt50
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Mod.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Mul.pbtxt14
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Multinomial.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MutableDenseHashTable.pbtxt72
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MutableDenseHashTableV2.pbtxt74
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MutableHashTable.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MutableHashTableOfTensors.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MutableHashTableOfTensorsV2.pbtxt44
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MutableHashTableV2.pbtxt51
-rw-r--r--tensorflow/core/api_def/base_api/api_def_N.pbtxt94
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Neg.pbtxt13
-rw-r--r--tensorflow/core/api_def/base_api/api_def_NegTrain.pbtxt40
-rw-r--r--tensorflow/core/api_def/base_api/api_def_NextIteration.pbtxt16
-rw-r--r--tensorflow/core/api_def/base_api/api_def_NoOp.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_NonMaxSuppression.pbtxt56
-rw-r--r--tensorflow/core/api_def/base_api/api_def_NonMaxSuppressionV2.pbtxt58
-rw-r--r--tensorflow/core/api_def/base_api/api_def_NotEqual.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_NthElement.pbtxt39
-rw-r--r--tensorflow/core/api_def/base_api/api_def_O.pbtxt195
-rw-r--r--tensorflow/core/api_def/base_api/api_def_OneHot.pbtxt130
-rw-r--r--tensorflow/core/api_def/base_api/api_def_OneShotIterator.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_OnesLike.pbtxt16
-rw-r--r--tensorflow/core/api_def/base_api/api_def_OrderedMapClear.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_OrderedMapIncompleteSize.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_OrderedMapPeek.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_OrderedMapSize.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_OrderedMapStage.pbtxt40
-rw-r--r--tensorflow/core/api_def/base_api/api_def_OrderedMapUnstage.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_OrderedMapUnstageNoKey.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_P.pbtxt431
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Pack.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Pad.pbtxt28
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PadV2.pbtxt30
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PaddedBatchDataset.pbtxt27
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PaddingFIFOQueue.pbtxt56
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PaddingFIFOQueueV2.pbtxt58
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ParallelConcat.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ParallelDynamicStitch.pbtxt67
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ParallelInterleaveDataset.pbtxt21
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ParallelMapDataset.pbtxt15
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ParameterizedTruncatedNormal.pbtxt66
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ParseExample.pbtxt78
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ParseSingleSequenceExample.pbtxt112
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ParseTensor.pbtxt23
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Placeholder.pbtxt28
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PlaceholderV2.pbtxt28
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PlaceholderWithDefault.pbtxt28
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Polygamma.pbtxt12
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PopulationCount.pbtxt12
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Pow.pbtxt14
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PrefetchDataset.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PreventGradient.pbtxt32
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Print.pbtxt43
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PriorityQueue.pbtxt54
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PriorityQueueV2.pbtxt56
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Prod.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PyFunc.pbtxt40
-rw-r--r--tensorflow/core/api_def/base_api/api_def_PyFuncStateless.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Q.pbtxt609
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Qr.pbtxt45
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantize.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV2.pbtxt93
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV3.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizeDownAndShrinkRange.pbtxt64
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizeV2.pbtxt128
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedAdd.pbtxt43
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedAvgPool.pbtxt54
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedBatchNormWithGlobalNormalization.pbtxt118
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedBiasAdd.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedConcat.pbtxt50
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedConv2D.pbtxt65
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedInstanceNorm.pbtxt72
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedMatMul.pbtxt77
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedMaxPool.pbtxt54
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedMul.pbtxt43
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedRelu.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedRelu6.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedReluX.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedReshape.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QuantizedResizeBilinear.pbtxt35
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueClose.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueCloseV2.pbtxt27
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueDequeue.pbtxt39
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueDequeueMany.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueDequeueManyV2.pbtxt54
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueDequeueUpTo.pbtxt56
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueDequeueUpToV2.pbtxt58
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueDequeueV2.pbtxt41
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueEnqueue.pbtxt32
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueEnqueueMany.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueEnqueueManyV2.pbtxt39
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueEnqueueV2.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueIsClosed.pbtxt14
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueIsClosedV2.pbtxt14
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueSize.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_QueueSizeV2.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_R.pbtxt1392
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RFFT.pbtxt40
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RFFT2D.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RFFT3D.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RGBToHSV.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RandomCrop.pbtxt44
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RandomGamma.pbtxt45
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RandomPoisson.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RandomPoissonV2.pbtxt51
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RandomShuffle.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RandomShuffleQueue.pbtxt68
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RandomShuffleQueueV2.pbtxt70
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RandomStandardNormal.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RandomUniform.pbtxt40
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RandomUniformInt.pbtxt51
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Range.pbtxt41
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RangeDataset.pbtxt22
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Rank.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReadFile.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReadVariableOp.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderNumRecordsProduced.pbtxt15
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderNumRecordsProducedV2.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderNumWorkUnitsCompleted.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt13
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderRead.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderReadUpTo.pbtxt41
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderReadUpToV2.pbtxt43
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderReadV2.pbtxt36
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderReset.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderResetV2.pbtxt13
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderRestoreState.pbtxt22
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderRestoreStateV2.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderSerializeState.pbtxt15
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReaderSerializeStateV2.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Real.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RealDiv.pbtxt10
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Reciprocal.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReciprocalGrad.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RecordInput.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReduceJoin.pbtxt59
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RefEnter.pbtxt41
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RefExit.pbtxt20
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RefIdentity.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RefMerge.pbtxt30
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RefNextIteration.pbtxt16
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RefSelect.pbtxt22
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RefSwitch.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Relu.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Relu6.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Relu6Grad.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReluGrad.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RemoteCall.pbtxt40
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RemoteFusedGraphExecute.pbtxt32
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RepeatDataset.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RequantizationRange.pbtxt39
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Requantize.pbtxt60
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Reshape.pbtxt68
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResizeArea.pbtxt40
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResizeBicubic.pbtxt35
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResizeBicubicGrad.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResizeBilinear.pbtxt35
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResizeBilinearGrad.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResizeNearestNeighbor.pbtxt32
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResizeNearestNeighborGrad.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyAdadelta.pbtxt59
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyAdagrad.pbtxt40
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyAdagradDA.pbtxt59
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyAdam.pbtxt84
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyCenteredRMSProp.pbtxt80
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyFtrl.pbtxt67
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyFtrlV2.pbtxt69
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyGradientDescent.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyMomentum.pbtxt56
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyProximalAdagrad.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyProximalGradientDescent.pbtxt45
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceApplyRMSProp.pbtxt66
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceCountUpTo.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceGather.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceScatterAdd.pbtxt43
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceScatterUpdate.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdadelta.pbtxt53
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdagrad.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdagradDA.pbtxt65
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyCenteredRMSProp.pbtxt84
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyFtrl.pbtxt74
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyFtrlV2.pbtxt76
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyMomentum.pbtxt64
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyProximalAdagrad.pbtxt60
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyProximalGradientDescent.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyRMSProp.pbtxt72
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceStridedSliceAssign.pbtxt12
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Restore.pbtxt55
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RestoreSlice.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RestoreV2.pbtxt52
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Reverse.pbtxt69
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReverseSequence.pbtxt91
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ReverseV2.pbtxt74
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RightShift.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Rint.pbtxt15
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Round.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Rsqrt.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_RsqrtGrad.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_S.pbtxt2678
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SampleDistortedBoundingBox.pbtxt131
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SampleDistortedBoundingBoxV2.pbtxt131
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Save.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SaveSlices.pbtxt53
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SaveV2.pbtxt35
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScalarSummary.pbtxt26
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScanDataset.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterAdd.pbtxt60
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterDiv.pbtxt58
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterMul.pbtxt58
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt102
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt74
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt68
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt74
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt76
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterSub.pbtxt60
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterUpdate.pbtxt63
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SdcaFprint.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SdcaOptimizer.pbtxt167
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SdcaShrinkL1.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SegmentMax.pbtxt32
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SegmentMean.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SegmentMin.pbtxt32
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SegmentProd.pbtxt32
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SegmentSum.pbtxt32
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Select.pbtxt69
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SelfAdjointEig.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SelfAdjointEigV2.pbtxt44
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Selu.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SeluGrad.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SerializeIterator.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SerializeManySparse.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SerializeSparse.pbtxt22
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SerializeTensor.pbtxt22
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SetSize.pbtxt38
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Shape.pbtxt14
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ShapeN.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ShardedFilename.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ShardedFilespec.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ShuffleDataset.pbtxt36
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Sigmoid.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SigmoidGrad.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Sign.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Sin.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Sinh.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Size.pbtxt15
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SkipDataset.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Skipgram.pbtxt78
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Slice.pbtxt28
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Softmax.pbtxt21
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SoftmaxCrossEntropyWithLogits.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Softplus.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SoftplusGrad.pbtxt23
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Softsign.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SoftsignGrad.pbtxt23
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SpaceToBatch.pbtxt109
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SpaceToBatchND.pbtxt140
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SpaceToDepth.pbtxt95
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseAccumulatorApplyGradient.pbtxt55
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseAccumulatorTakeGradient.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseAdd.pbtxt62
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseAddGrad.pbtxt50
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyAdadelta.pbtxt59
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyAdagrad.pbtxt53
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyAdagradDA.pbtxt71
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyCenteredRMSProp.pbtxt90
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyFtrl.pbtxt80
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyFtrlV2.pbtxt82
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyMomentum.pbtxt70
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyProximalAdagrad.pbtxt66
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyProximalGradientDescent.pbtxt58
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseApplyRMSProp.pbtxt78
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseConcat.pbtxt90
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseConditionalAccumulator.pbtxt44
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseCross.pbtxt106
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseAdd.pbtxt45
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseDiv.pbtxt39
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseMul.pbtxt43
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseFillEmptyRows.pbtxt87
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseFillEmptyRowsGrad.pbtxt38
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseMatMul.pbtxt13
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseReduceMax.pbtxt55
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseReduceMaxSparse.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseReduceSum.pbtxt55
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseReduceSumSparse.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseReorder.pbtxt46
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseReshape.pbtxt55
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseSegmentMean.pbtxt30
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseSegmentMeanGrad.pbtxt32
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseSegmentSqrtN.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseSegmentSqrtNGrad.pbtxt32
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseSegmentSum.pbtxt53
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseSlice.pbtxt67
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseSoftmax.pbtxt46
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseSparseMaximum.pbtxt56
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseSparseMinimum.pbtxt56
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseSplit.pbtxt70
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseTensorDenseAdd.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseTensorDenseMatMul.pbtxt53
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseTensorSliceDataset.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseToDense.pbtxt65
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SparseToSparseSetOperation.pbtxt93
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Split.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SplitV.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SqlDataset.pbtxt22
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Sqrt.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SqrtGrad.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Square.pbtxt7
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SquaredDifference.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Squeeze.pbtxt46
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Stack.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StackClose.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StackCloseV2.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StackPop.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StackPopV2.pbtxt23
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StackPush.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StackPushV2.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StackV2.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Stage.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StageClear.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StagePeek.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StageSize.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StatelessRandomNormal.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StatelessRandomUniform.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StatelessTruncatedNormal.pbtxt35
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StopGradient.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StridedSlice.pbtxt167
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StridedSliceAssign.pbtxt12
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StridedSliceGrad.pbtxt14
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StringJoin.pbtxt21
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StringSplit.pbtxt64
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StringToHashBucket.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StringToHashBucketFast.pbtxt30
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StringToHashBucketStrong.pbtxt41
-rw-r--r--tensorflow/core/api_def/base_api/api_def_StringToNumber.pbtxt20
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Sub.pbtxt14
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Substr.pbtxt103
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Sum.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Svd.pbtxt62
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Switch.pbtxt34
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SymbolicGradient.pbtxt51
-rw-r--r--tensorflow/core/api_def/base_api/api_def_T.pbtxt619
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TFRecordDataset.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TFRecordReader.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TFRecordReaderV2.pbtxt27
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TakeDataset.pbtxt12
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TakeManySparseFromTensorsMap.pbtxt100
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Tan.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Tanh.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TanhGrad.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TemporaryVariable.pbtxt45
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArray.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayClose.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayCloseV2.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayCloseV3.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayConcat.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayConcatV2.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayConcatV3.pbtxt62
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayGather.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayGatherV2.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayGatherV3.pbtxt49
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayGrad.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayGradV2.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayGradV3.pbtxt64
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayPack.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayRead.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayReadV2.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayReadV3.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayScatter.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayScatterV2.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayScatterV3.pbtxt40
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArraySize.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArraySizeV2.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArraySizeV3.pbtxt25
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArraySplit.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArraySplitV2.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArraySplitV3.pbtxt57
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayUnpack.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayV2.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayV3.pbtxt65
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayWrite.pbtxt3
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayWriteV2.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorArrayWriteV3.pbtxt37
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorDataset.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorSliceDataset.pbtxt4
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorSummary.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TensorSummaryV2.pbtxt23
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TextLineDataset.pbtxt24
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TextLineReader.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TextLineReaderV2.pbtxt33
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ThreadUnsafeUnigramCandidateSampler.pbtxt87
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Tile.pbtxt23
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TileGrad.pbtxt9
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TopK.pbtxt50
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TopKV2.pbtxt51
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Transpose.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TruncateDiv.pbtxt13
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TruncateMod.pbtxt11
-rw-r--r--tensorflow/core/api_def/base_api/api_def_TruncatedNormal.pbtxt42
-rw-r--r--tensorflow/core/api_def/base_api/api_def_U.pbtxt150
-rw-r--r--tensorflow/core/api_def/base_api/api_def_UniformCandidateSampler.pbtxt86
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Unique.pbtxt39
-rw-r--r--tensorflow/core/api_def/base_api/api_def_UniqueWithCounts.pbtxt47
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Unpack.pbtxt40
-rw-r--r--tensorflow/core/api_def/base_api/api_def_UnsortedSegmentMax.pbtxt36
-rw-r--r--tensorflow/core/api_def/base_api/api_def_UnsortedSegmentSum.pbtxt36
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Unstage.pbtxt8
-rw-r--r--tensorflow/core/api_def/base_api/api_def_V.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_VarHandleOp.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_VarIsInitializedOp.pbtxt17
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Variable.pbtxt5
-rw-r--r--tensorflow/core/api_def/base_api/api_def_VariableShape.pbtxt14
-rw-r--r--tensorflow/core/api_def/base_api/api_def_VariableV2.pbtxt44
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Where.pbtxt (renamed from tensorflow/core/api_def/base_api/api_def_W.pbtxt)67
-rw-r--r--tensorflow/core/api_def/base_api/api_def_WholeFileReader.pbtxt29
-rw-r--r--tensorflow/core/api_def/base_api/api_def_WholeFileReaderV2.pbtxt31
-rw-r--r--tensorflow/core/api_def/base_api/api_def_WriteFile.pbtxt19
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Z.pbtxt27
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ZerosLike.pbtxt16
-rw-r--r--tensorflow/core/api_def/base_api/api_def_Zeta.pbtxt10
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ZipDataset.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_A.pbtxt56
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Abs.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AccumulateNV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AddManySparseToTensorsMap.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AddN.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AddSparseToTensorsMap.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AddV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AdjustContrastv2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_All.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AllCandidateSampler.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Any.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Assert.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AudioSummary.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AudioSummaryV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AvgPool.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_AvgPool3DGrad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_B.pbtxt142
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Barrier.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BarrierClose.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BarrierIncompleteSize.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BarrierInsertMany.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BarrierReadySize.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BarrierTakeMany.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchCholesky.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchCholeskyGrad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchFFT.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchFFT2D.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchFFT3D.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchIFFT.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchIFFT2D.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchIFFT3D.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchMatMul.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchMatrixDeterminant.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchMatrixInverse.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchMatrixSolve.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchMatrixSolveLs.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchMatrixTriangularSolve.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchNormWithGlobalNormalization.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchSelfAdjointEig.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchSelfAdjointEigV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchSvd.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BatchToSpace.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BiasAdd.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BiasAddV1.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BitwiseAnd.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BitwiseOr.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BitwiseXor.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_BroadcastArgs.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Bucketize.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_C.pbtxt59
-rw-r--r--tensorflow/core/api_def/python_api/api_def_CTCBeamSearchDecoder.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_CTCGreedyDecoder.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_CTCLoss.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Cholesky.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Complex.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ComplexAbs.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ComputeAccidentalHits.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Concat.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ConcatOffset.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ConcatV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Conj.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ConjugateTranspose.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Const.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_CropAndResize.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_D.pbtxt74
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DebugGradientIdentity.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DecodeAndCropJpeg.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DecodeBmp.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DecodeCSV.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DecodeGif.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DecodeJpeg.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DecodePng.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DeleteSessionTensor.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNative.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DeserializeManySparse.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DestroyTemporaryVariable.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_DrawBoundingBoxes.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_E.pbtxt46
-rw-r--r--tensorflow/core/api_def/python_api/api_def_EditDistance.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Elu.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_EncodeJpeg.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_EncodePng.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ExpandDims.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ExtractGlimpse.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ExtractJpegShape.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_F.pbtxt73
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FFT.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FIFOQueue.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FIFOQueueV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Fact.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FakeQueue.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FixedLengthRecordReader.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FixedLengthRecordReaderV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FixedUnigramCandidateSampler.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FloorDiv.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FloorMod.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FractionalAvgPool.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FractionalMaxPool.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FusedBatchNorm.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_FusedBatchNormV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_G.pbtxt16
-rw-r--r--tensorflow/core/api_def/python_api/api_def_GenerateVocabRemapping.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_GetSessionHandle.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_GetSessionHandleV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_GetSessionTensor.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_H.pbtxt18
-rw-r--r--tensorflow/core/api_def/python_api/api_def_HSVToRGB.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_HashTable.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_HashTableV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_HistogramFixedWidth.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_HistogramSummary.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_I.pbtxt55
-rw-r--r--tensorflow/core/api_def/python_api/api_def_IFFT.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_IdentityReader.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_IdentityReaderV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ImageSummary.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_InTopK.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_InTopKV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_InitializeTable.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_InitializeTableFromTextFile.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_InitializeTableFromTextFileV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_InitializeTableV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Invert.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_L.pbtxt96
-rw-r--r--tensorflow/core/api_def/python_api/api_def_L2Loss.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LMDBReader.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LRN.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LearnedUnigramCandidateSampler.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LeftShift.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LinSpace.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ListDiff.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LoadAndRemapMatrix.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LogMatrixDeterminant.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LogSoftmax.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LogUniformCandidateSampler.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LookupTableExport.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LookupTableExportV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LookupTableFind.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LookupTableFindV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LookupTableImport.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LookupTableImportV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LookupTableInsert.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LookupTableInsertV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LookupTableSize.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_LookupTableSizeV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_M.pbtxt174
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatMul.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixBandPart.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixDeterminant.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixDiag.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixDiagPart.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixInverse.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixSetDiag.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixSolve.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixSolveLs.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MatrixTriangularSolve.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Max.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MaxPool.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MaxPool3DGrad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MaxPool3DGradGrad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MaxPoolGradGrad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MaxPoolGradGradWithArgmax.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MaxPoolV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MaxPoolWithArgmax.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Mean.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Merge.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MergeSummary.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Min.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MirrorPad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Mul.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MutableDenseHashTable.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MutableDenseHashTableV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MutableHashTable.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MutableHashTableOfTensors.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MutableHashTableOfTensorsV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_MutableHashTableV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_N.pbtxt16
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Neg.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_NegTrain.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_NonMaxSuppression.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_NonMaxSuppressionV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_OneHot.pbtxt (renamed from tensorflow/core/api_def/python_api/api_def_O.pbtxt)0
-rw-r--r--tensorflow/core/api_def/python_api/api_def_P.pbtxt68
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Pack.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Pad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_PadV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_PaddingFIFOQueue.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_PaddingFIFOQueueV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ParallelConcat.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ParameterizedTruncatedNormal.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ParseExample.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ParseSingleSequenceExample.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Placeholder.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Pow.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Print.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_PriorityQueue.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_PriorityQueueV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Prod.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_PyFunc.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_PyFuncStateless.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Q.pbtxt83
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Qr.pbtxt9
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QuantizedAvgPool.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QuantizedMaxPool.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QuantizedReluX.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueClose.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueCloseV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueDequeue.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueDequeueMany.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueDequeueManyV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueDequeueUpTo.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueDequeueUpToV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueDequeueV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueEnqueue.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueEnqueueMany.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueEnqueueManyV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueEnqueueV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueSize.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_QueueSizeV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_R.pbtxt192
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RGBToHSV.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RandomCrop.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RandomGamma.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RandomPoisson.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RandomShuffle.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RandomShuffleQueue.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RandomShuffleQueueV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RandomStandardNormal.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RandomUniform.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RandomUniformInt.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Range.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderNumRecordsProduced.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderNumRecordsProducedV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderNumWorkUnitsCompleted.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderRead.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderReadUpTo.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderReadUpToV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderReadV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderReset.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderResetV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderRestoreState.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderRestoreStateV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderSerializeState.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReaderSerializeStateV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RealDiv.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Relu.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Relu6.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ResizeArea.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ResizeBicubic.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ResizeBilinear.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ResizeNearestNeighbor.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Restore.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RestoreSlice.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Reverse.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ReverseV2.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_RightShift.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_S.pbtxt252
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SampleDistortedBoundingBox.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SampleDistortedBoundingBoxV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Save.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SaveSlices.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ScalarSummary.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SdcaFprint.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SdcaOptimizer.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SdcaShrinkL1.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Select.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SelfAdjointEig.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SelfAdjointEigV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Selu.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SerializeManySparse.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SerializeSparse.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ShardedFilename.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ShardedFilespec.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Sigmoid.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Skipgram.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Slice.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Softmax.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SoftmaxCrossEntropyWithLogits.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Softplus.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Softsign.pbtxt6
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SpaceToBatch.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseAdd.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseAddGrad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseConcat.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseCross.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseFillEmptyRows.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseFillEmptyRowsGrad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseMatMul.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseReorder.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseReshape.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseSplit.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseTensorDenseAdd.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseTensorDenseMatMul.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SparseToDense.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Split.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SplitV.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Squeeze.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Stack.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StackClose.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StackCloseV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StackPop.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StackPopV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StackPush.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StackPushV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StackV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_StringSplit.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Sub.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Sum.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Svd.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Switch.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_SymbolicGradient.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_T.pbtxt196
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TFRecordReader.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TFRecordReaderV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TakeManySparseFromTensorsMap.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Tanh.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TemporaryVariable.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArray.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayClose.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayCloseV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayCloseV3.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayConcat.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayConcatV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayConcatV3.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayGather.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayGatherV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayGatherV3.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayGrad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayGradV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayGradV3.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayPack.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayRead.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayReadV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayReadV3.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayScatter.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayScatterV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayScatterV3.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArraySize.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArraySizeV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArraySizeV3.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArraySplit.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArraySplitV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArraySplitV3.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayUnpack.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayV3.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayWrite.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayWriteV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorArrayWriteV3.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorSummary.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TensorSummaryV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TextLineReader.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TextLineReaderV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ThreadUnsafeUnigramCandidateSampler.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TileGrad.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TopK.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TopKV2.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TruncateDiv.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TruncateMod.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_TruncatedNormal.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_UniformCandidateSampler.pbtxt (renamed from tensorflow/core/api_def/python_api/api_def_U.pbtxt)4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Unpack.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_Variable.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_VariableV2.pbtxt (renamed from tensorflow/core/api_def/python_api/api_def_V.pbtxt)4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_WholeFileReader.pbtxt4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_WholeFileReaderV2.pbtxt (renamed from tensorflow/core/api_def/python_api/api_def_W.pbtxt)4
-rw-r--r--tensorflow/core/api_def/python_api/api_def_ZerosLike.pbtxt (renamed from tensorflow/core/api_def/python_api/api_def_Z.pbtxt)0
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_device.cc32
-rw-r--r--tensorflow/core/framework/shape_inference.h1
-rw-r--r--tensorflow/core/graph/graph_constructor.cc121
-rw-r--r--tensorflow/core/graph/graph_constructor.h11
-rw-r--r--tensorflow/core/graph/graph_constructor_test.cc134
-rw-r--r--tensorflow/core/grappler/costs/graph_properties.cc19
-rw-r--r--tensorflow/core/grappler/costs/graph_properties.h14
-rw-r--r--tensorflow/core/grappler/costs/graph_properties_test.cc3
-rw-r--r--tensorflow/core/grappler/costs/op_level_cost_estimator.cc2
-rw-r--r--tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc141
-rw-r--r--tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc76
-rw-r--r--tensorflow/core/grappler/optimizers/constant_folding.cc4
-rw-r--r--tensorflow/core/grappler/optimizers/layout_optimizer.cc130
-rw-r--r--tensorflow/core/grappler/optimizers/layout_optimizer.h16
-rw-r--r--tensorflow/core/grappler/optimizers/layout_optimizer_test.cc28
-rw-r--r--tensorflow/core/grappler/optimizers/meta_optimizer.cc62
-rw-r--r--tensorflow/core/grappler/optimizers/meta_optimizer.h3
-rw-r--r--tensorflow/core/grappler/utils.cc5
-rw-r--r--tensorflow/core/kernels/BUILD7
-rw-r--r--tensorflow/core/kernels/dataset.h25
-rw-r--r--tensorflow/core/kernels/generate_vocab_remapping_op.cc21
-rw-r--r--tensorflow/core/kernels/iterator_ops.cc6
-rw-r--r--tensorflow/core/kernels/matrix_exponential_op.cc59
-rw-r--r--tensorflow/core/kernels/prefetch_dataset_op.cc23
-rw-r--r--tensorflow/core/kernels/tensor_array.h34
-rw-r--r--tensorflow/core/ops/checkpoint_ops.cc9
-rw-r--r--tensorflow/core/ops/compat/ops_history.v1.pbtxt38
-rw-r--r--tensorflow/core/ops/linalg_ops.cc27
-rw-r--r--tensorflow/core/ops/ops.pbtxt12
-rw-r--r--tensorflow/go/op/wrappers.go27
-rw-r--r--tensorflow/python/BUILD28
-rw-r--r--tensorflow/python/eager/BUILD16
-rw-r--r--tensorflow/python/eager/benchmarks_test.py411
-rw-r--r--tensorflow/python/eager/pywrap_tensor.cc2
-rw-r--r--tensorflow/python/estimator/canned/head.py55
-rw-r--r--tensorflow/python/estimator/canned/head_test.py160
-rw-r--r--tensorflow/python/estimator/warm_starting_util.py43
-rw-r--r--tensorflow/python/estimator/warm_starting_util_test.py71
-rw-r--r--tensorflow/python/framework/c_api_util.py31
-rw-r--r--tensorflow/python/framework/graph_util_impl.py89
-rw-r--r--tensorflow/python/framework/importer.py33
-rw-r--r--tensorflow/python/framework/importer_test.py111
-rw-r--r--tensorflow/python/framework/ops.py6
-rw-r--r--tensorflow/python/grappler/layout_optimizer_test.py32
-rw-r--r--tensorflow/python/grappler/tf_optimizer.i5
-rw-r--r--tensorflow/python/keras/BUILD14
-rw-r--r--tensorflow/python/keras/__init__.py2
-rw-r--r--tensorflow/python/keras/_impl/keras/__init__.py2
-rw-r--r--tensorflow/python/keras/_impl/keras/applications/__init__.py1
-rw-r--r--tensorflow/python/keras/_impl/keras/applications/imagenet_utils.py21
-rw-r--r--tensorflow/python/keras/_impl/keras/applications/inception_resnet_v2.py369
-rw-r--r--tensorflow/python/keras/_impl/keras/applications/inception_resnet_v2_test.py59
-rw-r--r--tensorflow/python/keras/_impl/keras/applications/inception_v3.py18
-rw-r--r--tensorflow/python/keras/_impl/keras/applications/mobilenet.py14
-rw-r--r--tensorflow/python/keras/_impl/keras/applications/resnet50.py4
-rw-r--r--tensorflow/python/keras/_impl/keras/applications/vgg16.py6
-rw-r--r--tensorflow/python/keras/_impl/keras/applications/vgg19.py6
-rw-r--r--tensorflow/python/keras/_impl/keras/applications/xception.py20
-rw-r--r--tensorflow/python/keras/_impl/keras/backend.py11
-rw-r--r--tensorflow/python/keras/applications/__init__.py2
-rw-r--r--tensorflow/python/keras/applications/inception_resnet_v2/__init__.py27
-rw-r--r--tensorflow/python/kernel_tests/BUILD26
-rw-r--r--tensorflow/python/kernel_tests/checkpoint_ops_test.py15
-rw-r--r--tensorflow/python/kernel_tests/depthtospace_op_test.py31
-rw-r--r--tensorflow/python/kernel_tests/distributions/special_math_test.py16
-rw-r--r--tensorflow/python/kernel_tests/garbage_collection_test.py88
-rw-r--r--tensorflow/python/kernel_tests/matrix_exponential_op_test.py196
-rw-r--r--tensorflow/python/kernel_tests/spacetodepth_op_test.py27
-rw-r--r--tensorflow/python/kernel_tests/tensor_array_ops_test.py26
-rw-r--r--tensorflow/python/ops/array_grad.py12
-rw-r--r--tensorflow/python/ops/distributions/special_math.py6
-rw-r--r--tensorflow/python/ops/hidden_ops.txt1
-rw-r--r--tensorflow/python/ops/linalg/linalg_impl.py1
-rw-r--r--tensorflow/python/ops/math_ops.py1
-rw-r--r--tensorflow/python/ops/resource_variable_ops.py22
-rw-r--r--tensorflow/python/ops/tensor_array_ops.py53
-rw-r--r--tensorflow/python/platform/app.py103
-rw-r--r--tensorflow/python/platform/flags.py195
-rw-r--r--tensorflow/python/platform/flags_test.py97
-rw-r--r--tensorflow/python/tools/BUILD1
-rw-r--r--tensorflow/python/training/checkpoint_ops.py25
-rw-r--r--tensorflow/python/training/checkpoint_ops_test.py81
-rw-r--r--tensorflow/python/training/moving_averages.py7
-rw-r--r--tensorflow/python/training/moving_averages_test.py27
-rw-r--r--tensorflow/python/util/tf_should_use.py14
-rw-r--r--tensorflow/tensorflow.bzl22
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt269
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt294
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.inception_resnet_v2.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.resnet50.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.vgg16.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.vgg19.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/tensorflow.linalg.pbtxt4
-rw-r--r--tensorflow/tools/api/tests/api_compatibility_test.py76
-rwxr-xr-xtensorflow/tools/ci_build/install/install_pip_packages.sh4
-rwxr-xr-xtensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh1
-rw-r--r--tensorflow/tools/ci_build/windows/cpu/cmake/run_py.bat3
-rw-r--r--tensorflow/tools/ci_build/windows/gpu/cmake/run_py.bat3
-rw-r--r--tensorflow/tools/docs/generate_lib.py31
-rw-r--r--tensorflow/tools/pip_package/setup.py1
-rw-r--r--tensorflow/workspace.bzl17
-rw-r--r--third_party/eigen.BUILD2
-rw-r--r--third_party/eigen3/BUILD1
-rw-r--r--third_party/eigen3/unsupported/Eigen/MatrixFunctions1
1305 files changed, 35474 insertions, 14134 deletions
diff --git a/configure.py b/configure.py
index 8572fa7fdb..6279c42610 100644
--- a/configure.py
+++ b/configure.py
@@ -25,10 +25,12 @@ import re
import subprocess
import sys
+# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
+# pylint: enable=g-import-not-at-top
_TF_BAZELRC = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'.tf_configure.bazelrc')
@@ -485,7 +487,10 @@ def set_cc_opt_flags(environ_cp):
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
- write_to_bazelrc('build:opt --cxxopt=%s --copt=%s' % (opt, opt))
+ host_opt = '-march=native' # It should be safe on the same build host.
+ write_to_bazelrc(
+ 'build:opt --cxxopt=%s --copt=%s' % (opt, opt) +
+ ' --host_cxxopt=%s --host_copt=%s' % (host_opt, host_opt))
def set_tf_cuda_clang(environ_cp):
diff --git a/tensorflow/compiler/tf2xla/functionalize_control_flow.cc b/tensorflow/compiler/tf2xla/functionalize_control_flow.cc
index 893175373f..6ef4860f35 100644
--- a/tensorflow/compiler/tf2xla/functionalize_control_flow.cc
+++ b/tensorflow/compiler/tf2xla/functionalize_control_flow.cc
@@ -130,7 +130,9 @@ Status CopySubgraph(const Graph& graph, const Frame* frame,
stack.push_back(src);
}
Node* src_copy = (*node_map)[e->src()->id()];
- int src_output = squash_src_outputs[e->src()->id()] ? 0 : e->src_output();
+ int src_output = squash_src_outputs[e->src()->id()] && !e->IsControlEdge()
+ ? 0
+ : e->src_output();
Node* dst_copy = (*node_map)[e->dst()->id()];
output->AddEdge(src_copy, src_output, dst_copy, e->dst_input());
}
diff --git a/tensorflow/compiler/tf2xla/kernels/gather_op.cc b/tensorflow/compiler/tf2xla/kernels/gather_op.cc
index 2c5d910d58..e420f21ca3 100644
--- a/tensorflow/compiler/tf2xla/kernels/gather_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/gather_op.cc
@@ -77,18 +77,6 @@ xla::ComputationDataHandle XlaComputeGatherDynamicSlice(
out_shape.dim_sizes());
}
- // Degenerate case: single slice.
- if (num_indices == 1) {
- auto index = builder->Reshape(indices, {1});
- auto start_index = builder->Pad(
- index, XlaHelpers::Zero(builder, index_type),
- xla::MakeEdgePaddingConfig(
- {{input_shape_pre_axis.dims(), input_shape_post_axis.dims()}}));
- auto slice =
- builder->DynamicSlice(input, start_index, slice_shape.dim_sizes());
- return builder->Reshape(slice, out_shape.dim_sizes());
- }
-
// Specify the shape of the loop-carried Tensor tuple.
xla::PrimitiveType ptype;
TF_CHECK_OK(DataTypeToPrimitiveType(dtype, &ptype));
diff --git a/tensorflow/compiler/xla/client/computation_builder.cc b/tensorflow/compiler/xla/client/computation_builder.cc
index 24774c4c2a..763d94e94c 100644
--- a/tensorflow/compiler/xla/client/computation_builder.cc
+++ b/tensorflow/compiler/xla/client/computation_builder.cc
@@ -1309,7 +1309,7 @@ Status ComputationBuilder::SetReturnValue(
}
StatusOr<bool> ComputationBuilder::IsConstant(
- const ComputationDataHandle& operand) {
+ const ComputationDataHandle& operand, int64 num_parameters) {
if (!first_error_.ok()) {
return first_error_;
}
@@ -1317,6 +1317,7 @@ StatusOr<bool> ComputationBuilder::IsConstant(
IsConstantRequest request;
*request.mutable_computation() = computation_.handle();
*request.mutable_operand() = operand;
+ request.set_num_parameters(num_parameters);
IsConstantResponse response;
VLOG(2) << "making IsConstant request";
@@ -1330,7 +1331,8 @@ StatusOr<bool> ComputationBuilder::IsConstant(
}
StatusOr<std::unique_ptr<Literal>> ComputationBuilder::ComputeConstant(
- const ComputationDataHandle& operand, const Layout* output_layout) {
+ const ComputationDataHandle& operand, const Layout* output_layout,
+ tensorflow::gtl::ArraySlice<Literal> parameters) {
if (!first_error_.ok()) {
return first_error_;
}
@@ -1341,6 +1343,9 @@ StatusOr<std::unique_ptr<Literal>> ComputationBuilder::ComputeConstant(
if (output_layout != nullptr) {
*request.mutable_output_layout() = *output_layout;
}
+ for (const auto& param : parameters) {
+ *request.add_parameters() = param.ToProto();
+ }
ComputeConstantResponse response;
diff --git a/tensorflow/compiler/xla/client/computation_builder.h b/tensorflow/compiler/xla/client/computation_builder.h
index bc7ad06a3f..8e1b4be1f3 100644
--- a/tensorflow/compiler/xla/client/computation_builder.h
+++ b/tensorflow/compiler/xla/client/computation_builder.h
@@ -746,11 +746,12 @@ class ComputationBuilder {
ComputationDataHandle Recv(const Shape& shape, const ChannelHandle& handle);
// Returns true if 'operand' is a compile-time constant. A compile-time
- // constant does not depend on parameters, or on stateful operators such
- // as `RngNormal` or `Infeed`. Unlike `ComputeConstant`, `IsConstant` tests
- // whether a computation is a compile-time constant without evaluating the
- // computation.
- StatusOr<bool> IsConstant(const ComputationDataHandle& operand);
+ // constant does not depend on parameters with higher index then
+ // `num_parameters`, or on stateful operators such as `RngNormal` or `Infeed`.
+ // Unlike `ComputeConstant`, `IsConstant` tests whether a computation is a
+ // compile-time constant without evaluating the computation.
+ StatusOr<bool> IsConstant(const ComputationDataHandle& operand,
+ int64 num_parameters = 0);
// Normalizes operand across spatial and batch dimensions for each feature.
//
@@ -795,7 +796,7 @@ class ComputationBuilder {
float epsilon, int64 feature_index);
// Computes the value of a constant indicated by a
- // ComputationDataHandle.
+ // ComputationDataHandle using a non-optimized interpreter on the host.
//
// The operand must be from the computation currently being built -
// i.e., returned from this builder with no intervening call to
@@ -803,8 +804,11 @@ class ComputationBuilder {
// that may stop working at any time.
//
// The operand must represent a constant value, which in this case
- // means that it must not statically depend on a parameter to the
- // computation that is being built.
+ // means that it must not statically depend on any parameter of the
+ // computation that is being built other then the ones specified on the
+ // paramtere list. The parameters in the list will be indexed by their
+ // parameter id property so the number of parameters specified should be at
+ // least as many as the largest used parameter index.
//
// `IsConstant` can be used to test whether a computation is a compile-time
// constant without evaluation it. `ComputeConstant` only succeeds for
@@ -822,7 +826,8 @@ class ComputationBuilder {
// will be stored using that layout.
StatusOr<std::unique_ptr<Literal>> ComputeConstant(
const ComputationDataHandle& operand,
- const Layout* output_layout = nullptr);
+ const Layout* output_layout = nullptr,
+ tensorflow::gtl::ArraySlice<Literal> parameters = {});
// Returns a new ComputationBuilder whose resultant Computation is used only
// by this ComputationBuilder. The sub-ComputationBuilder has the same
diff --git a/tensorflow/compiler/xla/service/buffer_assignment.cc b/tensorflow/compiler/xla/service/buffer_assignment.cc
index 8536429846..b422b22df9 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment.cc
@@ -101,6 +101,11 @@ BufferAllocationProto BufferAllocation::ToProto() const {
proto_assigned->set_offset(buffer_offset_size.second.offset);
proto_assigned->set_size(buffer_offset_size.second.size);
}
+ std::sort(proto.mutable_assigned()->begin(), proto.mutable_assigned()->end(),
+ [](const BufferAllocationProto::Assigned& assign1,
+ const BufferAllocationProto::Assigned& assign2) {
+ return assign1.logical_buffer_id() < assign2.logical_buffer_id();
+ });
return proto;
}
diff --git a/tensorflow/compiler/xla/service/cpu/llvm_ir_runtime.cc b/tensorflow/compiler/xla/service/cpu/llvm_ir_runtime.cc
index b490472831..81c29e4726 100644
--- a/tensorflow/compiler/xla/service/cpu/llvm_ir_runtime.cc
+++ b/tensorflow/compiler/xla/service/cpu/llvm_ir_runtime.cc
@@ -52,7 +52,7 @@ llvm::Function* EmitVectorF32TanhIfNeeded(llvm::Module* module,
llvm::IRBuilder<> ir_builder(vector_tanh_body);
llvm::FastMathFlags fast_math_flags;
- fast_math_flags.setUnsafeAlgebra();
+ fast_math_flags.setFast();
ir_builder.setFastMathFlags(fast_math_flags);
llvm::Value* input = &*vector_tanh_function->arg_begin();
diff --git a/tensorflow/compiler/xla/service/executable.h b/tensorflow/compiler/xla/service/executable.h
index 2d32e59d36..7e0d182b36 100644
--- a/tensorflow/compiler/xla/service/executable.h
+++ b/tensorflow/compiler/xla/service/executable.h
@@ -88,6 +88,16 @@ class Executable {
tensorflow::gtl::ArraySlice<perftools::gputools::DeviceMemoryBase>>
arguments);
+ // Populates `hlo_execution_profile` from `executor`. This is implicit in any
+ // Execute* API call that takes a hlo_execution_profile argument, but must be
+ // called explicitly for other (async, for example) variants after the stream
+ // has completed.
+ virtual Status PopulateExecutionProfile(
+ HloExecutionProfile* hlo_execution_profile,
+ perftools::gputools::StreamExecutor* executor) {
+ return Status::OK();
+ }
+
// Convenience wrapper for calling Executable::ExecuteOnStream. Sets up a
// timer for the execution, sets up HLO profiling if enabled, and fills in the
// given ExecutionProfile if non-null. The ExecuteOnStream overloads have
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc
index e09899e48d..5107ac782d 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction.cc
@@ -1901,12 +1901,13 @@ std::vector<string> HloInstruction::ExtraAttributesToString() const {
if (has_sharding()) {
extra.push_back(StrCat("sharding=", sharding().ToString()));
}
- if (!control_successors_.empty()) {
- extra.push_back(StrCat(
- "control-successors=",
- Join(control_successors_, ", ", [](string* out, HloInstruction* succ) {
- StrAppend(out, succ->name());
- })));
+ if (!control_predecessors_.empty()) {
+ extra.push_back(StrCat("control-predecessors={",
+ Join(control_predecessors_, ", ",
+ [](string* out, HloInstruction* pre) {
+ StrAppend(out, pre->name());
+ }),
+ "}"));
}
return extra;
}
diff --git a/tensorflow/compiler/xla/service/hlo_runner.cc b/tensorflow/compiler/xla/service/hlo_runner.cc
index aaa4e3a2e3..f463e57d99 100644
--- a/tensorflow/compiler/xla/service/hlo_runner.cc
+++ b/tensorflow/compiler/xla/service/hlo_runner.cc
@@ -41,11 +41,21 @@ namespace se = ::perftools::gputools;
namespace xla {
/*static*/ StatusOr<std::unique_ptr<HloModule>>
-HloRunner::ReadModuleFromHloProtoFile(const char* filename,
+HloRunner::ReadModuleFromHloProtoFile(const std::string& filename,
const DebugOptions& debug_options) {
HloProto proto;
- TF_RETURN_IF_ERROR(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
- filename, &proto));
+
+ const Status s =
+ tensorflow::ReadBinaryProto(tensorflow::Env::Default(), filename, &proto);
+
+ if (!s.ok()) {
+ const Status s2 =
+ tensorflow::ReadTextProto(tensorflow::Env::Default(), filename, &proto);
+ if (!s2.ok()) {
+ return Status(s2.code(), s.error_message() + "\n" + s2.error_message());
+ }
+ }
+
TF_ASSIGN_OR_RETURN(
HloModuleConfig config,
HloModule::CreateModuleConfigFromProto(proto.hlo_module()));
@@ -56,7 +66,7 @@ HloRunner::ReadModuleFromHloProtoFile(const char* filename,
}
/*static*/ StatusOr<std::unique_ptr<HloModule>>
-HloRunner::ReadModuleFromHloTextDumpFile(const char* filename,
+HloRunner::ReadModuleFromHloTextDumpFile(const std::string& filename,
const DebugOptions& debug_options) {
string hlo_string;
TF_RETURN_IF_ERROR(tensorflow::ReadFileToString(tensorflow::Env::Default(),
@@ -66,6 +76,19 @@ HloRunner::ReadModuleFromHloTextDumpFile(const char* filename,
return tools::Parse(hlo_string, config);
}
+/*static*/ StatusOr<std::unique_ptr<HloModule>> HloRunner::ReadModule(
+ const std::string& filename, const DebugOptions& debug_options) {
+ auto module = HloRunner::ReadModuleFromHloProtoFile(filename, debug_options);
+ if (module.ok()) {
+ return module;
+ }
+ const std::string e = module.status().error_message();
+ module = HloRunner::ReadModuleFromHloTextDumpFile(filename, debug_options);
+ return module.ok() ? std::move(module)
+ : Status(module.status().code(),
+ e + "\n" + module.status().error_message());
+}
+
// Define this in .cc file to avoid having to include eigen or forward declare
// these types in the header.
struct HloRunner::EigenThreadPoolWrapper {
diff --git a/tensorflow/compiler/xla/service/hlo_runner.h b/tensorflow/compiler/xla/service/hlo_runner.h
index b0e2b980e2..a5732848c6 100644
--- a/tensorflow/compiler/xla/service/hlo_runner.h
+++ b/tensorflow/compiler/xla/service/hlo_runner.h
@@ -44,15 +44,23 @@ class HloRunner {
~HloRunner();
- // Reads the binary proto file in xla.HloProto format, creates and returns the
- // HloModule.
+ // Reads the proto file in xla.HloProto format, creates and returns the
+ // HloModule. Will try to parse the filename as binary proto, then try as
+ // text proto if that fails.
static StatusOr<std::unique_ptr<HloModule>> ReadModuleFromHloProtoFile(
- const char* filename, const DebugOptions& debug_options);
+ const std::string& filename, const DebugOptions& debug_options);
// Reads the hlo text dump file in HloModule::ToString format, creates and
// returns the HloModule.
static StatusOr<std::unique_ptr<HloModule>> ReadModuleFromHloTextDumpFile(
- const char* filename, const DebugOptions& debug_options);
+ const std::string& filename, const DebugOptions& debug_options);
+
+ // Tries to parse the filename specified first as binary proto format, then
+ // as a textual proto format, then textual IR, then gives up if both fail.
+ // ReadModuleFromHloProtoFile or ReadModuleFromHloTextDumpFile should be used
+ // explicitly when you know the format, this if you don't.
+ static StatusOr<std::unique_ptr<HloModule>> ReadModule(
+ const std::string& filename, const DebugOptions& debug_options);
// Executes the given module with given literals as input and returns the
// result as a Literal. The LiteralPtr type accepts Literal* or
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
index 5dff4b5778..956c0d5f05 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
@@ -555,8 +555,9 @@ int64 ByteSizeOf(const Shape& shape, const llvm::DataLayout& data_layout) {
llvm::FastMathFlags GetFastMathFlags(bool fast_math_enabled) {
llvm::FastMathFlags flags;
if (fast_math_enabled) {
- // UnsafeAlgebra implies NoInfs, NoNaNs, NoSignedZeros, and AllowReciprocal.
- flags.setUnsafeAlgebra();
+ // Fast implies AllowReassoc, NoInfs, NoNaNs, NoSignedZeros,
+ // AllowReciprocal, AllowContract, and ApproxFunc.
+ flags.setFast();
}
return flags;
}
diff --git a/tensorflow/compiler/xla/service/service.cc b/tensorflow/compiler/xla/service/service.cc
index bac33d8102..71afbee456 100644
--- a/tensorflow/compiler/xla/service/service.cc
+++ b/tensorflow/compiler/xla/service/service.cc
@@ -490,14 +490,20 @@ Service::ExecuteParallelAndRegisterResult(
std::vector<perftools::gputools::DeviceMemoryBase>>
arguments,
Backend* backend, tensorflow::gtl::ArraySlice<DeviceHandle> device_handles,
- tensorflow::gtl::ArraySlice<string> result_tags) {
+ tensorflow::gtl::ArraySlice<string> result_tags,
+ ExecutionProfile* profile) {
// Streams where the computation are launched, so we can wait on the streams
// to complete.
std::vector<Pool<se::Stream>::SmartPtr> streams;
+ std::vector<std::unique_ptr<perftools::gputools::Timer>> timers;
// Global data handles for the computation results, one for each computation.
std::vector<GlobalDataHandle> result_handles;
+ // Device ID to stream executor, populated only with devices that are being
+ // profiled.
+ std::map<int64, se::Stream*> index_to_profiled_streams;
+
TF_ASSIGN_OR_RETURN(DeviceAssignment device_assignment,
backend->computation_placer()->AssignDevices(
options_.number_of_replicas(), executables.size()));
@@ -510,6 +516,21 @@ Service::ExecuteParallelAndRegisterResult(
backend->BorrowStream(replicas[replica]));
streams.push_back(std::move(stream));
+ if (replica == 0 && profile != nullptr) {
+ timers.emplace_back(
+ new perftools::gputools::Timer(streams.back()->parent()));
+ streams.back()
+ ->InitTimer(timers.back().get())
+ .ThenStartTimer(timers.back().get());
+ CHECK(timers.front() != nullptr);
+ }
+
+ if (replica == 0 &&
+ executables[i]->module_config().debug_options().xla_hlo_profile() &&
+ executables[i]->hlo_profiling_enabled()) {
+ index_to_profiled_streams[i] = streams.back().get();
+ }
+
// Set up run options.
ExecutableRunOptions options;
options.set_stream(streams.back().get());
@@ -526,6 +547,10 @@ Service::ExecuteParallelAndRegisterResult(
perftools::gputools::DeviceMemoryBase result,
executables[i]->ExecuteAsyncOnStream(&run_options, arguments[i]));
+ if (replica == 0 && profile != nullptr) {
+ streams.back()->ThenStopTimer(timers.back().get());
+ }
+
// All replicas share the same device address for the result allocation,
// so only one of the replicas need to register the result handle.
if (replica == 0) {
@@ -543,6 +568,69 @@ Service::ExecuteParallelAndRegisterResult(
}
}
+ // For every stream that had profiling enabled, obtain and debug-dump the HLO
+ // profile.
+ for (auto& index_to_profiled_stream : index_to_profiled_streams) {
+ int64 device = index_to_profiled_stream.first;
+ se::Stream* stream = index_to_profiled_stream.second;
+ HloExecutionProfile hlo_profile;
+ TF_RETURN_IF_ERROR(executables[device]->PopulateExecutionProfile(
+ &hlo_profile, stream->parent()));
+
+ std::unordered_set<const xla::HloComputation*> profiled_computations =
+ hlo_profile.profiled_computations();
+ // To ensure we have print the profiles in a stable order, iterate over the
+ // computations in post order.
+ auto& module = executables[device]->module();
+ std::list<xla::HloComputation*> all_computations =
+ module.MakeComputationPostOrder();
+ for (xla::HloComputation* computation : all_computations) {
+ if (profiled_computations.count(computation) > 0) {
+ string profile_string = hlo_profile.ToString(
+ *computation, streams[0]->parent()->GetDeviceDescription(),
+ executables[device]->CreateCostAnalysis().get());
+ if (!profile_string.empty()) {
+ LOG(INFO) << "HLO profile for execution on device " << device
+ << ":\n";
+ XLA_LOG_LINES(tensorflow::INFO, profile_string);
+ }
+ }
+ }
+ hlo_graph_dumper::MaybeDumpHloModule(module, "Service::Execute",
+ &hlo_profile);
+ }
+
+ if (profile != nullptr) {
+ CHECK(!timers.empty());
+ std::vector<uint64> timer_nanoseconds;
+ timer_nanoseconds.reserve(timers.size());
+ for (auto& timer : timers) {
+ timer_nanoseconds.push_back(timer->Nanoseconds());
+ }
+ uint64 nanoseconds =
+ *std::max_element(timer_nanoseconds.begin(), timer_nanoseconds.end());
+
+ // Merge in run-time profile information from execution_profile on the
+ // zeroth device.
+ profile->MergeFrom(executables[0]->execution_profile());
+
+ // Overall execution time (in nanoseconds) from the executor timer.
+ profile->set_compute_and_transfer_time_ns(nanoseconds);
+
+ // TODO(b/28123297): On GPU we end up including transfer time in
+ // the compute time this way. Instead, we should get the correct
+ // value by measuring it. Setting the field here at least lets
+ // benchmarks provide *some* value for GPU computations.
+ //
+ // TODO(b/28447609): The value in compute_and_transfer_time_ns is actually
+ // the compute time without the transfer time, so this way we get the
+ // correct compute time. We should instead have the correct value for
+ // compute_and_transfer_time and set compute_time to the compute time.
+ if (profile->compute_time_ns() == 0) {
+ profile->set_compute_time_ns(profile->compute_and_transfer_time_ns());
+ }
+ }
+
return result_handles;
}
@@ -715,14 +803,16 @@ tensorflow::Status Service::ExecuteParallel(const ExecuteParallelRequest* arg,
// Execute the generated executables in parallel and return the device
// handles for each computation's output.
+ ExecutionProfile profile;
TF_ASSIGN_OR_RETURN(
std::vector<GlobalDataHandle> outputs,
ExecuteParallelAndRegisterResult(executable_ptrs, all_arguments,
execute_backend_.get(), device_handles,
- computation_names));
+ computation_names, &profile));
for (const GlobalDataHandle& output : outputs) {
ExecuteResponse response;
*response.mutable_output() = output;
+ *response.mutable_profile() = profile;
*result->add_responses() = response;
}
@@ -1082,8 +1172,9 @@ tensorflow::Status Service::IsConstant(const IsConstantRequest* arg,
return InvalidArgument("computations may not be empty");
}
- TF_ASSIGN_OR_RETURN(bool is_constant,
- user_computation->IsConstant(arg->operand()));
+ TF_ASSIGN_OR_RETURN(
+ bool is_constant,
+ user_computation->IsConstant(arg->operand(), arg->num_parameters()));
result->set_is_constant(is_constant);
return tensorflow::Status::OK();
@@ -1101,8 +1192,9 @@ tensorflow::Status Service::ComputeConstant(const ComputeConstantRequest* arg,
return InvalidArgument("computations may not be empty");
}
- TF_ASSIGN_OR_RETURN(bool is_constant,
- user_computation->IsConstant(arg->operand()));
+ TF_ASSIGN_OR_RETURN(
+ bool is_constant,
+ user_computation->IsConstant(arg->operand(), arg->parameters_size()));
if (!is_constant) {
return InvalidArgument("Operand to ComputeConstant depends on parameter.");
}
@@ -1141,8 +1233,18 @@ tensorflow::Status Service::ComputeConstant(const ComputeConstantRequest* arg,
/*include_unreachable_instructions=*/
false));
+ std::vector<Literal> parameters(arg->parameters_size());
+ for (int64 i = 0; i < arg->parameters_size(); ++i) {
+ parameters[i] = Literal(arg->parameters(i));
+ }
+ std::vector<const Literal*> parameter_ptrs;
+ std::transform(parameters.begin(), parameters.end(),
+ std::back_inserter(parameter_ptrs),
+ [](const Literal& literal) { return &literal; });
+
HloEvaluator evaluator;
- TF_ASSIGN_OR_RETURN(auto result_literal, evaluator.Evaluate(*module, {}));
+ TF_ASSIGN_OR_RETURN(auto result_literal,
+ evaluator.Evaluate(*module, parameter_ptrs));
// Since the shape_with_output_layout option in ExecutionOption is
// non-effective to the Evaluator results, explicit relayout here.
if (arg->has_output_layout()) {
diff --git a/tensorflow/compiler/xla/service/service.h b/tensorflow/compiler/xla/service/service.h
index 2452259f73..6646be2e9a 100644
--- a/tensorflow/compiler/xla/service/service.h
+++ b/tensorflow/compiler/xla/service/service.h
@@ -327,7 +327,8 @@ class Service : public ServiceInterface {
arguments,
Backend* backend,
tensorflow::gtl::ArraySlice<DeviceHandle> device_handles,
- tensorflow::gtl::ArraySlice<string> result_tags);
+ tensorflow::gtl::ArraySlice<string> result_tags,
+ ExecutionProfile* profile);
// Convenience function for adding a function to a user computation.
template <typename RequestT, typename ResponseT>
diff --git a/tensorflow/compiler/xla/service/user_computation.cc b/tensorflow/compiler/xla/service/user_computation.cc
index 006c814996..e9d182509b 100644
--- a/tensorflow/compiler/xla/service/user_computation.cc
+++ b/tensorflow/compiler/xla/service/user_computation.cc
@@ -1482,14 +1482,15 @@ UserComputation::ComputeProgramShape(
namespace {
-// A visitor which checks whether an operation is a compile-time constant. That
-// is, the operation does not depend on any parameter instructions. The visitor
-// walks the computation starting at a given operation and sets is_constant to
-// false iff a parameter or RNG operation is encountered.
-void ConstantVisitor(const SessionComputation& session_computation,
- const ComputationDataHandle& handle,
- std::set<int64>* visited, bool* is_constant) {
- if (visited->count(handle.handle()) != 0 || !*is_constant) {
+// A visitor which checks whether an operation is pure functional meaning that
+// it doesn't depend on any parameter with an index higher then num_parameters.
+// The visitor walks the computation starting at a given operation and sets
+// is_functional to false iff a parameter or RNG operation is encountered.
+void PureFunctionalVisitor(const SessionComputation& session_computation,
+ const ComputationDataHandle& handle,
+ int64 num_parameters, std::set<int64>* visited,
+ bool* is_functional) {
+ if (visited->count(handle.handle()) != 0 || !*is_functional) {
return;
}
@@ -1497,7 +1498,7 @@ void ConstantVisitor(const SessionComputation& session_computation,
session_computation.requests().at(handle.handle());
switch (request.request().op_case()) {
case OpRequest::kRngRequest:
- *is_constant = false;
+ *is_functional = false;
break;
case OpRequest::kConstantRequest:
@@ -1506,41 +1507,43 @@ void ConstantVisitor(const SessionComputation& session_computation,
case OpRequest::kGetTupleElementRequest: {
const GetTupleElementRequest& get_tuple_element_request =
request.request().get_tuple_element_request();
- ConstantVisitor(session_computation, get_tuple_element_request.operand(),
- visited, is_constant);
+ PureFunctionalVisitor(session_computation,
+ get_tuple_element_request.operand(), num_parameters,
+ visited, is_functional);
break;
}
case OpRequest::kSliceRequest: {
const SliceRequest& slice_request = request.request().slice_request();
- ConstantVisitor(session_computation, slice_request.operand(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, slice_request.operand(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kDynamicSliceRequest: {
const DynamicSliceRequest& dynamic_slice_request =
request.request().dynamic_slice_request();
- ConstantVisitor(session_computation, dynamic_slice_request.operand(),
- visited, is_constant);
- ConstantVisitor(session_computation,
- dynamic_slice_request.start_indices(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation,
+ dynamic_slice_request.operand(), num_parameters,
+ visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ dynamic_slice_request.start_indices(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kDynamicUpdateSliceRequest: {
const DynamicUpdateSliceRequest& dynamic_update_slice_request =
request.request().dynamic_update_slice_request();
- ConstantVisitor(session_computation,
- dynamic_update_slice_request.operand(), visited,
- is_constant);
- ConstantVisitor(session_computation,
- dynamic_update_slice_request.update(), visited,
- is_constant);
- ConstantVisitor(session_computation,
- dynamic_update_slice_request.start_indices(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation,
+ dynamic_update_slice_request.operand(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ dynamic_update_slice_request.update(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ dynamic_update_slice_request.start_indices(),
+ num_parameters, visited, is_functional);
break;
}
@@ -1549,7 +1552,8 @@ void ConstantVisitor(const SessionComputation& session_computation,
request.request().concatenate_request();
for (const ComputationDataHandle& handle :
concatenate_request.operands()) {
- ConstantVisitor(session_computation, handle, visited, is_constant);
+ PureFunctionalVisitor(session_computation, handle, num_parameters,
+ visited, is_functional);
}
break;
}
@@ -1557,61 +1561,63 @@ void ConstantVisitor(const SessionComputation& session_computation,
case OpRequest::kConvolveRequest: {
const ConvolveRequest& convolve_request =
request.request().convolve_request();
- ConstantVisitor(session_computation, convolve_request.lhs(), visited,
- is_constant);
- ConstantVisitor(session_computation, convolve_request.rhs(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, convolve_request.lhs(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation, convolve_request.rhs(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kCrossReplicaSumRequest: {
// TODO(b/33009255): Implmement constant folding for cross replica sum.
- *is_constant = false;
+ *is_functional = false;
break;
}
case OpRequest::kInfeedRequest: {
- *is_constant = false;
+ *is_functional = false;
break;
}
case OpRequest::kOutfeedRequest: {
- *is_constant = false;
+ *is_functional = false;
break;
}
case OpRequest::kCallRequest: {
const CallRequest& call_request = request.request().call_request();
for (const ComputationDataHandle& handle : call_request.operands()) {
- ConstantVisitor(session_computation, handle, visited, is_constant);
+ PureFunctionalVisitor(session_computation, handle, num_parameters,
+ visited, is_functional);
}
// TODO(b/32495713): We aren't checking the to_apply computation itself,
// so we conservatively say that computations containing the Call op
- // cannot be constant. We cannot set is_constant=false in other similar
+ // cannot be constant. We cannot set is_functional=false in other similar
// cases since we're already relying on IsConstant to return true.
- *is_constant = false;
+ *is_functional = false;
break;
}
case OpRequest::kCustomCallRequest: {
- *is_constant = false;
+ *is_functional = false;
break;
}
case OpRequest::kSendRequest: {
- *is_constant = false;
+ *is_functional = false;
break;
}
case OpRequest::kRecvRequest: {
- *is_constant = false;
+ *is_functional = false;
break;
}
case OpRequest::kMapRequest: {
const MapRequest& map_request = request.request().map_request();
for (const ComputationDataHandle& handle : map_request.operands()) {
- ConstantVisitor(session_computation, handle, visited, is_constant);
+ PureFunctionalVisitor(session_computation, handle, num_parameters,
+ visited, is_functional);
}
// TODO(b/32495713): We aren't checking the to_apply computation itself.
break;
@@ -1619,10 +1625,10 @@ void ConstantVisitor(const SessionComputation& session_computation,
case OpRequest::kReduceRequest: {
const ReduceRequest& reduce_request = request.request().reduce_request();
- ConstantVisitor(session_computation, reduce_request.operand(), visited,
- is_constant);
- ConstantVisitor(session_computation, reduce_request.init_value(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, reduce_request.operand(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation, reduce_request.init_value(),
+ num_parameters, visited, is_functional);
// TODO(b/32495713): We aren't checking the to_apply computation itself.
break;
}
@@ -1630,10 +1636,12 @@ void ConstantVisitor(const SessionComputation& session_computation,
case OpRequest::kReduceWindowRequest: {
const ReduceWindowRequest& reduce_window_request =
request.request().reduce_window_request();
- ConstantVisitor(session_computation, reduce_window_request.operand(),
- visited, is_constant);
- ConstantVisitor(session_computation, reduce_window_request.init_value(),
- visited, is_constant);
+ PureFunctionalVisitor(session_computation,
+ reduce_window_request.operand(), num_parameters,
+ visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ reduce_window_request.init_value(), num_parameters,
+ visited, is_functional);
// TODO(b/32495713): We aren't checking the to_apply computation itself.
break;
}
@@ -1641,13 +1649,15 @@ void ConstantVisitor(const SessionComputation& session_computation,
case OpRequest::kSelectAndScatterRequest: {
const SelectAndScatterRequest& select_and_scatter_request =
request.request().select_and_scatter_request();
- ConstantVisitor(session_computation, select_and_scatter_request.operand(),
- visited, is_constant);
- ConstantVisitor(session_computation, select_and_scatter_request.source(),
- visited, is_constant);
- ConstantVisitor(session_computation,
- select_and_scatter_request.init_value(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation,
+ select_and_scatter_request.operand(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ select_and_scatter_request.source(), num_parameters,
+ visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ select_and_scatter_request.init_value(),
+ num_parameters, visited, is_functional);
// TODO(b/32495713): We aren't checking the select and scatter
// computations themselves.
break;
@@ -1656,76 +1666,80 @@ void ConstantVisitor(const SessionComputation& session_computation,
case OpRequest::kBroadcastRequest: {
const BroadcastRequest& broadcast_request =
request.request().broadcast_request();
- ConstantVisitor(session_computation, broadcast_request.operand(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, broadcast_request.operand(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kReshapeRequest: {
const ReshapeRequest& reshape_request =
request.request().reshape_request();
- ConstantVisitor(session_computation, reshape_request.operand(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, reshape_request.operand(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kReverseRequest: {
const ReverseRequest& reverse_request =
request.request().reverse_request();
- ConstantVisitor(session_computation, reverse_request.operand(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, reverse_request.operand(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kPadRequest: {
const PadRequest& pad_request = request.request().pad_request();
- ConstantVisitor(session_computation, pad_request.operand(), visited,
- is_constant);
- ConstantVisitor(session_computation, pad_request.padding_value(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, pad_request.operand(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation, pad_request.padding_value(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kParameterRequest: {
- *is_constant = false;
+ const ParameterRequest& parameter_request =
+ request.request().parameter_request();
+ if (parameter_request.parameter() >= num_parameters) {
+ *is_functional = false;
+ }
break;
}
case OpRequest::kConvertRequest: {
const ConvertRequest& convert_request =
request.request().convert_request();
- ConstantVisitor(session_computation, convert_request.operand(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, convert_request.operand(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kWhileRequest: {
const WhileRequest& while_request = request.request().while_request();
- ConstantVisitor(session_computation, while_request.init(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, while_request.init(),
+ num_parameters, visited, is_functional);
// TODO(b/32495713): We aren't checking the condition and body
// computations themselves.
- *is_constant = false;
+ *is_functional = false;
break;
}
case OpRequest::kTernaryOpRequest: {
const TernaryOpRequest& ternary_op_request =
request.request().ternary_op_request();
- ConstantVisitor(session_computation, ternary_op_request.lhs(), visited,
- is_constant);
- ConstantVisitor(session_computation, ternary_op_request.rhs(), visited,
- is_constant);
- ConstantVisitor(session_computation, ternary_op_request.ehs(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, ternary_op_request.lhs(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation, ternary_op_request.rhs(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation, ternary_op_request.ehs(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kTransposeRequest: {
const TransposeRequest& transpose_request =
request.request().transpose_request();
- ConstantVisitor(session_computation, transpose_request.operand(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, transpose_request.operand(),
+ num_parameters, visited, is_functional);
break;
}
@@ -1734,7 +1748,8 @@ void ConstantVisitor(const SessionComputation& session_computation,
request.request().variadic_op_request();
for (const ComputationDataHandle& handle :
variadic_op_request.operands()) {
- ConstantVisitor(session_computation, handle, visited, is_constant);
+ PureFunctionalVisitor(session_computation, handle, num_parameters,
+ visited, is_functional);
}
break;
}
@@ -1742,67 +1757,74 @@ void ConstantVisitor(const SessionComputation& session_computation,
case OpRequest::kUnaryOpRequest: {
const UnaryOpRequest& unary_op_request =
request.request().unary_op_request();
- ConstantVisitor(session_computation, unary_op_request.operand(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, unary_op_request.operand(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kBatchNormTrainingRequest: {
const BatchNormTrainingRequest& batch_norm_training_request =
request.request().batch_norm_training_request();
- ConstantVisitor(session_computation,
- batch_norm_training_request.operand(), visited,
- is_constant);
- ConstantVisitor(session_computation, batch_norm_training_request.scale(),
- visited, is_constant);
- ConstantVisitor(session_computation, batch_norm_training_request.offset(),
- visited, is_constant);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_training_request.operand(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_training_request.scale(), num_parameters,
+ visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_training_request.offset(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kBatchNormInferenceRequest: {
const BatchNormInferenceRequest& batch_norm_inference_request =
request.request().batch_norm_inference_request();
- ConstantVisitor(session_computation,
- batch_norm_inference_request.operand(), visited,
- is_constant);
- ConstantVisitor(session_computation, batch_norm_inference_request.scale(),
- visited, is_constant);
- ConstantVisitor(session_computation,
- batch_norm_inference_request.offset(), visited,
- is_constant);
- ConstantVisitor(session_computation, batch_norm_inference_request.mean(),
- visited, is_constant);
- ConstantVisitor(session_computation,
- batch_norm_inference_request.variance(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_inference_request.operand(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_inference_request.scale(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_inference_request.offset(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_inference_request.mean(), num_parameters,
+ visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_inference_request.variance(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kBatchNormGradRequest: {
const BatchNormGradRequest& batch_norm_grad_request =
request.request().batch_norm_grad_request();
- ConstantVisitor(session_computation, batch_norm_grad_request.operand(),
- visited, is_constant);
- ConstantVisitor(session_computation, batch_norm_grad_request.scale(),
- visited, is_constant);
- ConstantVisitor(session_computation, batch_norm_grad_request.mean(),
- visited, is_constant);
- ConstantVisitor(session_computation, batch_norm_grad_request.variance(),
- visited, is_constant);
- ConstantVisitor(session_computation,
- batch_norm_grad_request.grad_output(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_grad_request.operand(), num_parameters,
+ visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_grad_request.scale(), num_parameters,
+ visited, is_functional);
+ PureFunctionalVisitor(session_computation, batch_norm_grad_request.mean(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_grad_request.variance(), num_parameters,
+ visited, is_functional);
+ PureFunctionalVisitor(session_computation,
+ batch_norm_grad_request.grad_output(),
+ num_parameters, visited, is_functional);
break;
}
case OpRequest::kBinaryOpRequest: {
const BinaryOpRequest& binary_op_request =
request.request().binary_op_request();
- ConstantVisitor(session_computation, binary_op_request.lhs(), visited,
- is_constant);
- ConstantVisitor(session_computation, binary_op_request.rhs(), visited,
- is_constant);
+ PureFunctionalVisitor(session_computation, binary_op_request.lhs(),
+ num_parameters, visited, is_functional);
+ PureFunctionalVisitor(session_computation, binary_op_request.rhs(),
+ num_parameters, visited, is_functional);
break;
}
@@ -1817,8 +1839,8 @@ void ConstantVisitor(const SessionComputation& session_computation,
} // namespace
-StatusOr<bool> UserComputation::IsConstant(
- const ComputationDataHandle& handle) {
+StatusOr<bool> UserComputation::IsConstant(const ComputationDataHandle& handle,
+ int64 num_parameters) {
tensorflow::mutex_lock lock(mutex_);
// Verify that the handle is valid.
@@ -1829,7 +1851,8 @@ StatusOr<bool> UserComputation::IsConstant(
bool is_constant = true;
std::set<int64> visited;
- ConstantVisitor(session_computation_, handle, &visited, &is_constant);
+ PureFunctionalVisitor(session_computation_, handle, num_parameters, &visited,
+ &is_constant);
return is_constant;
}
diff --git a/tensorflow/compiler/xla/service/user_computation.h b/tensorflow/compiler/xla/service/user_computation.h
index dabf68e298..ac879ce55a 100644
--- a/tensorflow/compiler/xla/service/user_computation.h
+++ b/tensorflow/compiler/xla/service/user_computation.h
@@ -250,9 +250,11 @@ class UserComputation {
StatusOr<std::shared_ptr<const ProgramShape>> ComputeProgramShape(
VersionedComputationHandle::Version version) const;
- // Returns true if the given data handle does not depend on any
- // parameters. That is, the value can be computed at compile time.
- StatusOr<bool> IsConstant(const ComputationDataHandle& handle);
+ // Returns true if the given data handle does not depend on any parameter with
+ // index higher then num_parameters. That is, the value can be computed at
+ // compile time if we know the first num_parameters arguments.
+ StatusOr<bool> IsConstant(const ComputationDataHandle& handle,
+ int64 num_parameters);
// Returns the output shape of the operation indicated by the given handle.
StatusOr<Shape> GetShape(const ComputationDataHandle& handle);
diff --git a/tensorflow/compiler/xla/tests/compute_constant_test.cc b/tensorflow/compiler/xla/tests/compute_constant_test.cc
index b2e9743af7..d423c78476 100644
--- a/tensorflow/compiler/xla/tests/compute_constant_test.cc
+++ b/tensorflow/compiler/xla/tests/compute_constant_test.cc
@@ -71,24 +71,27 @@ class ComputeConstantTest : public ::testing::Test {
StatusOr<std::unique_ptr<Literal>> ComputeConstantLiteral(
Client* client, const ComputationDataHandle& operand,
- ComputationBuilder* builder, Layout* output_layout = nullptr) {
- TF_ASSIGN_OR_RETURN(auto computed,
- builder->ComputeConstant(operand, output_layout));
+ ComputationBuilder* builder, Layout* output_layout = nullptr,
+ tensorflow::gtl::ArraySlice<Literal> parameters = {}) {
+ TF_ASSIGN_OR_RETURN(auto computed, builder->ComputeConstant(
+ operand, output_layout, parameters));
return std::move(computed);
}
template <class Scalar>
- StatusOr<Scalar> ComputeConstantScalar(Client* client,
- const ComputationDataHandle& operand,
- ComputationBuilder* builder) {
- TF_ASSIGN_OR_RETURN(auto literal,
- ComputeConstantLiteral(client, operand, builder));
+ StatusOr<Scalar> ComputeConstantScalar(
+ Client* client, const ComputationDataHandle& operand,
+ ComputationBuilder* builder,
+ tensorflow::gtl::ArraySlice<Literal> parameters = {}) {
+ TF_ASSIGN_OR_RETURN(
+ auto literal,
+ ComputeConstantLiteral(client, operand, builder, nullptr, parameters));
return literal->Get<Scalar>({});
}
bool IsConstant(const ComputationDataHandle& operand,
- ComputationBuilder* builder) {
- StatusOr<bool> result = builder->IsConstant(operand);
+ ComputationBuilder* builder, int64 num_parameters = 0) {
+ StatusOr<bool> result = builder->IsConstant(operand, num_parameters);
EXPECT_TRUE(result.ok()) << result.status();
return result.ok() ? result.ValueOrDie() : false;
}
@@ -138,7 +141,25 @@ TEST_F(ComputeConstantTest, ScalarRng) {
}
}
-TEST_F(ComputeConstantTest, DirectParam) {
+TEST_F(ComputeConstantTest, Param) {
+ for (ClientType client_type : client_types) {
+ Client* client = ClientOrDie(platform_, client_type);
+ ComputationBuilder b(client, TestName());
+ auto param = b.Parameter(0, ShapeUtil::MakeShape(F32, {}), "lhs");
+ auto computation = b.Add(param, b.ConstantR0<float>(1.5f));
+
+ std::vector<Literal> arguments;
+ arguments.emplace_back(*Literal::CreateR0(42.5f));
+ EXPECT_TRUE(IsConstant(computation, &b, arguments.size()));
+
+ auto value =
+ ComputeConstantScalar<float>(client, computation, &b, arguments);
+ ASSERT_TRUE(value.ok()) << value.status();
+ EXPECT_EQ(value.ValueOrDie(), 44.0f);
+ }
+}
+
+TEST_F(ComputeConstantTest, DirectParamMissing) {
for (ClientType client_type : client_types) {
Client* client = ClientOrDie(platform_, client_type);
ComputationBuilder b(client, TestName());
@@ -152,7 +173,7 @@ TEST_F(ComputeConstantTest, DirectParam) {
}
}
-TEST_F(ComputeConstantTest, IndirectParam) {
+TEST_F(ComputeConstantTest, IndirectParamMissing) {
for (ClientType client_type : client_types) {
Client* client = ClientOrDie(platform_, client_type);
ComputationBuilder b(client, TestName());
diff --git a/tensorflow/compiler/xla/tests/while_test.cc b/tensorflow/compiler/xla/tests/while_test.cc
index 71a1b0abee..3b29a2eb9e 100644
--- a/tensorflow/compiler/xla/tests/while_test.cc
+++ b/tensorflow/compiler/xla/tests/while_test.cc
@@ -357,6 +357,111 @@ TEST_F(WhileTest, WhileWithVectorResultIntoTuple) {
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.0001));
}
+// TODO(b/63003356): 11-06-2017: fails on all back-ends with incorrect result.
+TEST_F(WhileTest, DISABLED_WhileWithPermutationAndTupleResult) {
+ std::vector<Shape> shape_elements = {
+ ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(F32, {3}),
+ ShapeUtil::MakeShape(F32, {3}), ShapeUtil::MakeShape(F32, {3})};
+ Shape result_shape = ShapeUtil::MakeTupleShape(shape_elements);
+
+ // Create a computation for the condition.
+ // Repeat for N iterations.
+ const int N = 2;
+ Computation condition;
+ {
+ ComputationBuilder builder(client_, "condition");
+ auto prev = builder.Parameter(0, result_shape, "prev");
+ auto iteration = builder.GetTupleElement(prev, 0);
+ builder.Gt(builder.ConstantR0<int32>(N), iteration);
+ condition = builder.Build().ConsumeValueOrDie();
+ }
+
+ // Create a computation for the body.
+ // Add 1 to the iteration variable and permute the weights.
+ Computation body;
+ {
+ ComputationBuilder builder(client_, "body");
+ auto prev = builder.Parameter(0, result_shape, "prev");
+ auto iteration = builder.GetTupleElement(prev, 0);
+ auto w1 = builder.GetTupleElement(prev, 1);
+ auto w2 = builder.GetTupleElement(prev, 2);
+ auto w3 = builder.GetTupleElement(prev, 3);
+ auto result = builder.Tuple(
+ {builder.Add(iteration, builder.ConstantR0<int32>(1)), w3, w1, w2});
+ body = builder.Build().ConsumeValueOrDie();
+ }
+
+ // Create a While node with computations for the condition and the body.
+ ComputationBuilder builder(client_, "while");
+ auto init = builder.Tuple(
+ {builder.ConstantR0<int32>(0), builder.ConstantR1<float>(3, 1.f),
+ builder.ConstantR1<float>(3, 2.f), builder.ConstantR1<float>(3, 3.f)});
+ auto result = builder.While(condition, body, init);
+ VLOG(2) << "result = "
+ << ShapeUtil::HumanString(
+ *builder.GetShape(result).ConsumeValueOrDie());
+
+ auto expected_counter = Literal::CreateR0<int32>(N);
+ auto expected_w1 = Literal::CreateR1<float>({1.0f, 1.0f, 1.0f});
+ auto expected_w2 = Literal::CreateR1<float>({2.0f, 2.0f, 2.0f});
+ auto expected_w3 = Literal::CreateR1<float>({3.0f, 3.0f, 3.0f});
+ auto expected = Literal::MakeTuple({expected_counter.get(), expected_w2.get(),
+ expected_w3.get(), expected_w1.get()});
+ VLOG(2) << "expected = " << ShapeUtil::HumanString(expected->shape());
+ ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.0001));
+}
+
+// TODO(b/63003356): 11-06-2017: fails on all back-ends with incorrect result.
+TEST_F(WhileTest, DISABLED_WhileWithPermutationAndVectorResult) {
+ std::vector<Shape> shape_elements = {
+ ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(F32, {3}),
+ ShapeUtil::MakeShape(F32, {3}), ShapeUtil::MakeShape(F32, {3})};
+ Shape result_shape = ShapeUtil::MakeTupleShape(shape_elements);
+
+ // Create a computation for the condition.
+ // Repeat for N iterations.
+ const int N = 2;
+ Computation condition;
+ {
+ ComputationBuilder builder(client_, "condition");
+ auto prev = builder.Parameter(0, result_shape, "prev");
+ auto iteration = builder.GetTupleElement(prev, 0);
+ builder.Gt(builder.ConstantR0<int32>(N), iteration);
+ condition = builder.Build().ConsumeValueOrDie();
+ }
+
+ // Create a computation for the body.
+ // Add 1 to the iteration variable permute the weights.
+ Computation body;
+ {
+ ComputationBuilder builder(client_, "body");
+ auto prev = builder.Parameter(0, result_shape, "prev");
+ auto iteration = builder.GetTupleElement(prev, 0);
+ auto w1 = builder.GetTupleElement(prev, 1);
+ auto w2 = builder.GetTupleElement(prev, 2);
+ auto w3 = builder.GetTupleElement(prev, 3);
+ auto result = builder.Tuple(
+ {builder.Add(iteration, builder.ConstantR0<int32>(1)), w3, w1, w2});
+ body = builder.Build().ConsumeValueOrDie();
+ }
+
+ // Create a While node with computations for the condition and the body.
+ ComputationBuilder builder(client_, "while");
+ auto init = builder.Tuple(
+ {builder.ConstantR0<int32>(0), builder.ConstantR1<float>(3, 1.f),
+ builder.ConstantR1<float>(3, 2.f), builder.ConstantR1<float>(3, 3.f)});
+ auto xla_while = builder.While(condition, body, init);
+
+ auto add12 = builder.Add(builder.GetTupleElement(xla_while, 1),
+ builder.GetTupleElement(xla_while, 2));
+ auto result = builder.Add(add12, builder.GetTupleElement(xla_while, 3));
+ VLOG(2) << "result = "
+ << ShapeUtil::HumanString(
+ *builder.GetShape(result).ConsumeValueOrDie());
+ std::vector<float> expected = {6.f, 6.f, 6.f};
+ ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
+}
+
// Tests a while node when the result type T is a Tuple.
//
// tuple<int32, vector<float>> result(0, vector<float>(10, 0.0f));
diff --git a/tensorflow/compiler/xla/tools/parser/hlo_parser.cc b/tensorflow/compiler/xla/tools/parser/hlo_parser.cc
index 5de73ee866..6c2e37e3b5 100644
--- a/tensorflow/compiler/xla/tools/parser/hlo_parser.cc
+++ b/tensorflow/compiler/xla/tools/parser/hlo_parser.cc
@@ -58,6 +58,7 @@ class HloParser {
string* root_name);
bool ParseInstruction(HloComputation::Builder* builder, string* root_name);
bool ParseSharding(HloInstruction* instruction);
+ bool ParseControlPredecessors(HloInstruction* instruction);
bool ParseLiteral(std::unique_ptr<Literal>* literal, const Shape& shape);
bool ParseTupleLiteral(std::unique_ptr<Literal>* literal, const Shape& shape);
bool ParseNonTupleLiteral(std::unique_ptr<Literal>* literal,
@@ -436,10 +437,35 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
return TokenError(StrCat("parsing not yet implemented for op: ",
HloOpcodeString(opcode)));
}
- // Parse "sharding=".
- if (lexer_.GetKind() == TokKind::kComma) {
- if (!ParseSharding(instruction)) {
- return false;
+
+ bool has_sharding = false;
+ bool has_control = false;
+ while (EatIfPresent(TokKind::kComma)) {
+ string attribute_name;
+ if (!ParseAttributeName(&attribute_name)) {
+ return TokenError("expects ', sharding=' or ', control-predecessors='");
+ }
+
+ if (attribute_name == "sharding") {
+ // Parse "sharding=".
+ if (has_sharding) {
+ return TokenError("expects at most 1 'sharding='");
+ }
+ has_sharding = true;
+ if (!ParseSharding(instruction)) {
+ return false;
+ }
+ } else if (attribute_name == "control-predecessors") {
+ // Parse "control-predecessors"
+ if (has_control) {
+ return TokenError("expects at most 1 'control-predecessors='");
+ }
+ has_control = true;
+ if (!ParseControlPredecessors(instruction)) {
+ return false;
+ }
+ } else {
+ return TokenError(StrCat("unexpected attribute: ", attribute_name));
}
}
@@ -449,15 +475,6 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
// ::= '{' 'replicated'? 'maximal'? ('device=' int)? shape? ('devices=' ('['
// dims ']')* device_list)? '}' dims ::= int_list device_list ::= int_list
bool HloParser::ParseSharding(HloInstruction* instruction) {
- if (!ParseToken(TokKind::kComma,
- "expects ',' in front of an extra attribute")) {
- return false;
- }
- string attribute_name;
- if (!ParseAttributeName(&attribute_name) || attribute_name != "sharding") {
- return TokenError("expects attribute name: sharding");
- }
-
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start sharding attribute")) {
return false;
@@ -577,6 +594,34 @@ bool HloParser::ParseSharding(HloInstruction* instruction) {
return true;
}
+// '{' name+ '}'
+bool HloParser::ParseControlPredecessors(HloInstruction* instruction) {
+ if (!ParseToken(TokKind::kLbrace,
+ "expects '{' at the beginning of control predecessors")) {
+ return false;
+ }
+ do {
+ string name;
+ if (!ParseName(&name)) {
+ return TokenError("expects a control predecessor");
+ }
+ HloInstruction* pre =
+ tensorflow::gtl::FindPtrOrNull(instruction_pool_, name);
+ if (!pre) {
+ return TokenError(
+ StrCat("control predecessor ", name, " is not defined: "));
+ }
+ Status status = pre->AddControlDependencyTo(instruction);
+ if (!status.ok()) {
+ return TokenError(StrCat("error adding control dependency for: ", name,
+ " status: ", status.ToString()));
+ }
+ } while (EatIfPresent(TokKind::kComma));
+
+ return ParseToken(TokKind::kRbrace,
+ "expects '}' at the end of control predecessors");
+}
+
bool HloParser::SetValueInLiteral(int64 value, int64 linear_index,
Literal* literal) {
const Shape& shape = literal->shape();
diff --git a/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc b/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc
index e065af7da6..359256f064 100644
--- a/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc
+++ b/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc
@@ -214,7 +214,7 @@ R"(HloModule TwoSendRecvBothWayRecvFist_module:
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> f32[] {
%recv = f32[] recv(), channel_id=15, sharding={maximal device=1}
ROOT %constant = f32[] constant(2.1), sharding={maximal device=0}
- %send = () send(f32[] %constant), channel_id=16, sharding={maximal device=0}
+ %send = () send(f32[] %constant), channel_id=16, sharding={maximal device=0}, control-predecessors={%recv}
}
)"
diff --git a/tensorflow/compiler/xla/xla.proto b/tensorflow/compiler/xla/xla.proto
index ce3c3eee68..710bb6ff25 100644
--- a/tensorflow/compiler/xla/xla.proto
+++ b/tensorflow/compiler/xla/xla.proto
@@ -361,6 +361,7 @@ message WaitForExecutionResponse {
message IsConstantRequest {
ComputationHandle computation = 1;
ComputationDataHandle operand = 2;
+ int64 num_parameters = 3;
}
message IsConstantResponse {
@@ -371,6 +372,7 @@ message ComputeConstantRequest {
ComputationHandle computation = 1;
ComputationDataHandle operand = 2;
Layout output_layout = 3;
+ repeated LiteralProto parameters = 4;
}
message ComputeConstantResponse {
diff --git a/tensorflow/contrib/batching/adaptive_shared_batch_scheduler.h b/tensorflow/contrib/batching/adaptive_shared_batch_scheduler.h
index a0606427a5..6ed177e001 100644
--- a/tensorflow/contrib/batching/adaptive_shared_batch_scheduler.h
+++ b/tensorflow/contrib/batching/adaptive_shared_batch_scheduler.h
@@ -399,7 +399,7 @@ ASBSQueue<TaskType>::~ASBSQueue() {
template <typename TaskType>
Status ASBSQueue<TaskType>::Schedule(std::unique_ptr<TaskType>* task) {
- bool added_new_batch = false;
+ ASBSBatch<TaskType>* new_batch = nullptr;
size_t size = (*task)->size();
if (size > options_.max_batch_size) {
return errors::InvalidArgument("Task size ", size,
@@ -418,15 +418,14 @@ Status ASBSQueue<TaskType>::Schedule(std::unique_ptr<TaskType>* task) {
current_batch_ = nullptr;
}
if (!current_batch_) {
- added_new_batch = true;
num_enqueued_batches_++;
- current_batch_ =
+ current_batch_ = new_batch =
new ASBSBatch<TaskType>(this, scheduler_->GetEnv()->NowMicros());
}
current_batch_->AddTask(std::move(*task));
num_enqueued_tasks_++;
}
- if (added_new_batch) scheduler_->AddBatch(current_batch_);
+ if (new_batch != nullptr) scheduler_->AddBatch(new_batch);
return Status::OK();
}
diff --git a/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py b/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
index 4d9fd75323..cebe3474ca 100644
--- a/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
+++ b/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
@@ -208,7 +208,7 @@ def extract_features(features, feature_columns):
if tensor.dtype == dtypes.float32:
if len(tensor.shape) > 1 and tensor.shape[1] > 1:
unstacked = array_ops.unstack(tensor, axis=1)
- for i in xrange(len(unstacked)):
+ for i in range(len(unstacked)):
dense_float_names.append(_FEATURE_NAME_TEMPLATE % (key, i))
dense_floats.append(array_ops.reshape(unstacked[i], [-1, 1]))
else:
diff --git a/tensorflow/contrib/cmake/tf_python.cmake b/tensorflow/contrib/cmake/tf_python.cmake
index 68234911a3..4b60460cb2 100755
--- a/tensorflow/contrib/cmake/tf_python.cmake
+++ b/tensorflow/contrib/cmake/tf_python.cmake
@@ -224,6 +224,7 @@ add_python_module("tensorflow/python/grappler")
add_python_module("tensorflow/python/keras")
add_python_module("tensorflow/python/keras/activations")
add_python_module("tensorflow/python/keras/applications")
+add_python_module("tensorflow/python/keras/applications/inception_resnet_v2")
add_python_module("tensorflow/python/keras/applications/inception_v3")
add_python_module("tensorflow/python/keras/applications/mobilenet")
add_python_module("tensorflow/python/keras/applications/resnet50")
diff --git a/tensorflow/contrib/data/__init__.py b/tensorflow/contrib/data/__init__.py
index 6c46acf204..824ac4298f 100644
--- a/tensorflow/contrib/data/__init__.py
+++ b/tensorflow/contrib/data/__init__.py
@@ -30,6 +30,7 @@ See the @{$datasets$Importing Data} Programmer's Guide for an overview.
@@make_saveable_from_iterator
@@read_batch_features
@@unbatch
+@@parallel_interleave
@@rejection_resample
@@sloppy_interleave
@@ -50,6 +51,7 @@ from tensorflow.contrib.data.python.ops.dataset_ops import get_single_element
from tensorflow.contrib.data.python.ops.enumerate_ops import enumerate_dataset
from tensorflow.contrib.data.python.ops.error_ops import ignore_errors
from tensorflow.contrib.data.python.ops.grouping import group_by_window
+from tensorflow.contrib.data.python.ops.interleave_ops import parallel_interleave
from tensorflow.contrib.data.python.ops.interleave_ops import sloppy_interleave
from tensorflow.contrib.data.python.ops.iterator_ops import make_saveable_from_iterator
from tensorflow.contrib.data.python.ops.readers import FixedLengthRecordDataset
diff --git a/tensorflow/contrib/eager/python/examples/mnist/mnist.py b/tensorflow/contrib/eager/python/examples/mnist/mnist.py
index 3dd920415d..bfb7d5a900 100644
--- a/tensorflow/contrib/eager/python/examples/mnist/mnist.py
+++ b/tensorflow/contrib/eager/python/examples/mnist/mnist.py
@@ -191,9 +191,9 @@ def main(_):
train_dir = None
test_dir = None
summary_writer = tf.contrib.summary.create_summary_file_writer(
- train_dir, flush_secs=10)
+ train_dir, flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_summary_file_writer(
- test_dir, flush_secs=10, name='test')
+ test_dir, flush_millis=10000, name='test')
checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
with tf.device(device):
diff --git a/tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py b/tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py
index 318962c634..609cbd2877 100644
--- a/tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py
+++ b/tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py
@@ -248,9 +248,9 @@ def main(_):
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_summary_file_writer(
- os.path.join(log_dir, "train"), flush_secs=10)
+ os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_summary_file_writer(
- os.path.join(log_dir, "eval"), flush_secs=10, name="eval")
+ os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
diff --git a/tensorflow/contrib/estimator/BUILD b/tensorflow/contrib/estimator/BUILD
index a0f83ac105..6eb2cfdaca 100644
--- a/tensorflow/contrib/estimator/BUILD
+++ b/tensorflow/contrib/estimator/BUILD
@@ -7,6 +7,7 @@ package(
licenses(["notice"]) # Apache 2.0
load("//tensorflow:tensorflow.bzl", "py_test")
+load("//tensorflow:tensorflow.bzl", "cuda_py_test")
filegroup(
name = "all_files",
@@ -30,6 +31,7 @@ py_library(
":head",
":logit_fns",
":multi_head",
+ ":replicate_model_fn",
"//tensorflow/python:util",
],
)
@@ -227,9 +229,69 @@ py_test(
"//tensorflow/python:string_ops",
"//tensorflow/python/estimator:metric_keys",
"//tensorflow/python/estimator:model_fn",
- "//tensorflow/python/estimator:prediction_keys",
+ "//tensorflow/python/ops/losses",
"//tensorflow/python/saved_model:signature_constants",
"//third_party/py/numpy",
"@six_archive//:six",
],
)
+
+py_library(
+ name = "replicate_model_fn",
+ srcs = [
+ "python/estimator/replicate_model_fn.py",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:device",
+ "//tensorflow/python:device_lib",
+ "//tensorflow/python:framework_ops",
+ "//tensorflow/python:gradients",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:state_ops",
+ "//tensorflow/python:training",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
+ "//tensorflow/python/estimator:export_output",
+ "//tensorflow/python/estimator:model_fn",
+ "//tensorflow/python/estimator:util",
+ "@six_archive//:six",
+ ],
+)
+
+cuda_py_test(
+ name = "replicate_model_fn_test",
+ size = "small",
+ srcs = ["python/estimator/replicate_model_fn_test.py"],
+ additional_deps = [
+ "//tensorflow/python/estimator",
+ "//tensorflow/python/estimator:dnn",
+ "//tensorflow/python/estimator:export_export",
+ "//tensorflow/python/estimator:export_output",
+ "//tensorflow/python/estimator:model_fn",
+ "//tensorflow/python/estimator:numpy_io",
+ "//tensorflow/python/estimator:optimizers",
+ "//tensorflow/python/estimator:prediction_keys",
+ "//tensorflow/python/feature_column",
+ "//tensorflow/python/ops/losses",
+ "//tensorflow/python/saved_model:signature_constants",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:control_flow_ops",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:math_ops",
+ "//tensorflow/python:metrics",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:summary",
+ "//tensorflow/python:training",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
+ ":replicate_model_fn",
+ ],
+ tags = ["requires-gpu-sm35"],
+)
diff --git a/tensorflow/contrib/estimator/python/estimator/replicate_model_fn.py b/tensorflow/contrib/estimator/python/estimator/replicate_model_fn.py
new file mode 100644
index 0000000000..7005a647db
--- /dev/null
+++ b/tensorflow/contrib/estimator/python/estimator/replicate_model_fn.py
@@ -0,0 +1,470 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Utilities to replicate model_fn's over local GPUs.
+
+This file contains util that allow to replicate `Estimator.model_fn` over
+GPUs. Replicated version of a `model_fn` is returned that can subsequently
+be used with `Estimator`.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import copy
+
+import six
+
+from tensorflow.core.framework import node_def_pb2
+from tensorflow.python.client import device_lib
+from tensorflow.python.estimator import model_fn as model_fn_lib
+from tensorflow.python.estimator import util
+from tensorflow.python.estimator.export import export_output as export_output_lib
+from tensorflow.python.framework import device as framework_device
+from tensorflow.python.framework import ops as ops_lib
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import gradients as gradients_lib
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables as variables_lib
+from tensorflow.python.platform import tf_logging
+from tensorflow.python.training import training_util
+
+
+def replicate_model_fn(model_fn, optimizer_fn, devices=None):
+ """Replicate `Estimator.model_fn` over GPUs within a single host.
+
+ The given `model_fn` specifies a single forward pass of a model. To replicate
+ such a model over GPUs, each GPU gets its own instance of the forward pass
+ (a.k.a. a tower). The input features and labels get sharded into the chunks
+ that correspond to the number of GPUs. Each tower computes its own loss based
+ on its input. For each such loss, gradients are computed. After that, the
+ available losses are summed to form aggregated loss. The available
+ gradients are summed too. Then, they update weights using the specified
+ optimizer.
+
+ If `devices` are `None`, then all available GPUs are going to be used for
+ replication. If no GPUs are available, then the model is going to be
+ placed on the CPU.
+
+ Two modes of local replication over available GPUs are supported:
+ 1) If exactly 1 GPU is detected, then variables and operations are placed
+ onto GPU.
+ 2) If more than 1 GPU is detected, then variables are going to be placed on
+ the CPU. Replicas of operations are placed on each individual GPU.
+
+ Here is an example of how one might use their `model_fn` to run over GPUs:
+ ```python
+ def optimizer_fn():
+ return tf.train.GradientDescentOptimizer(learning_rate=0.001)
+ ...
+ def model_fn(...): # See `model_fn` in `Estimator`.
+ loss = ...
+ if mode == tf.estimator.ModeKeys.TRAIN:
+ # See the section below on `EstimatorSpec.train_op`.
+ return EstimatorSpec(mode=mode, loss=loss, train_op=tf.noop())
+
+ # No change for `ModeKeys.EVAL` or `ModeKeys.PREDICT`.
+ return EstimatorSpec(...)
+ ...
+ classifier = tf.estimator.Estimator(
+ model_fn=replicate_model_fn.replicate_model_fn(model_fn, optimizer_fn))
+ ```
+
+ On `EstimatorSpec.train_op`:
+ `model_fn` returns `EstimatorSpec.train_op` for
+ `tf.estimator.GraphKeys.TRAIN`. It is typically derived using an optimizer.
+ `replicate_model_fn` ignores the returned `EstimatorSpec.train_op`, so there
+ is no need to use an optimizer inside the user's `model_fn`. The
+ `EstimatorSpec.loss` subgraph is going to be executed, while
+ `EstimatorSpec.train_op` isn't going to be executed. One could pass
+ `train_op=tf.noop()` to `EstimatorSpec`.
+
+ On sharding input features and labels:
+ Input features and labels are split for consumption by each tower. They are
+ split across the dimension 0. Features and labels need to be batch major.
+
+ On reduction algorithms:
+ Certain algorithms were chosen for aggregating results of computations on
+ multiple towers:
+ - Losses from all towers are reduced using sum.
+ - Gradients are reduced using sum for each trainable variable.
+ - `eval_metrics_ops` are reduced per metric using `reduce_mean`.
+ - `EstimatorSpec.predictions` and `EstimatorSpec.export_outputs` are
+ reduced using concatenation.
+ - For all other fields of `EstimatorSpec` the values of the first tower
+ are taken.
+
+ On replication of variables:
+ Variables are not duplicated between towers. Instead, they are placed on a
+ single device as defined above and shared across towers.
+
+ Other current limitations:
+ - `predictions` are not supported for `ModeKeys.EVAL`. That is required for
+ `tf.contrib.estimator.add_metrics`.
+
+ Args:
+ model_fn: `model_fn` as defined in `Estimator`. See the section above about
+ the train_op argument of `EstimatorSpec`.
+ optimizer_fn: a function that returns an optimizer instance. The function
+ may accept one `params` argument. This is the `params` argument as
+ defined by `Estimator`. See the `Estimator` documentation for details.
+ devices: Optional list of devices to replicate the model across. This
+ argument can be used to replice only on the subset of available GPUs.
+ If `None`, then all available GPUs are going to be used for replication.
+ If no GPUs are available, then the model is going to be placed on the CPU.
+
+ Returns:
+ A replicated version of the supplied `model_fn`. Returned function that
+ conforms to the requirements of `Estimator`'s `model_fn` and can be used
+ instead of the supplied `model_fn`.
+ """
+ if not devices:
+ devices = _get_local_devices('GPU') or _get_local_devices('CPU')
+
+ is_a_single_gpu_case = len(devices) == 1 and 'GPU' in devices[0]
+ local_ps_device = '/{}:0'.format('GPU' if is_a_single_gpu_case else 'CPU')
+
+ tf_logging.info('Replicating the `model_fn` across {}. Local parameter '
+ 'server device is going to be {}.'.format(
+ devices, local_ps_device))
+
+ def replicated_model_fn(mode, features, labels, params=None, config=None):
+ """Replicated version of `model_fn` to be used instead."""
+ feature_shards, label_shards = _split_batch(
+ features, labels, len(devices), device=local_ps_device)
+ tower_specs = _get_loss_towers(
+ model_fn=model_fn,
+ mode=mode,
+ features=feature_shards,
+ labels=label_shards,
+ params=params,
+ config=config,
+ devices=devices,
+ local_ps_device=local_ps_device)
+
+ if mode == model_fn_lib.ModeKeys.TRAIN:
+ train_op = _minimize_towers(tower_specs,
+ _call_optimizer_fn(optimizer_fn, params))
+ return _train_spec(
+ tower_specs, train_op, aggregation_device=local_ps_device)
+ elif mode == model_fn_lib.ModeKeys.EVAL:
+ return _eval_spec(tower_specs, aggregation_device=local_ps_device)
+ elif mode == model_fn_lib.ModeKeys.PREDICT:
+ return _predict_spec(tower_specs, aggregation_device=local_ps_device)
+
+ return replicated_model_fn
+
+
+def _get_local_devices(device_type):
+ local_device_protos = device_lib.list_local_devices()
+ return [
+ device.name
+ for device in local_device_protos
+ if device.device_type == device_type
+ ]
+
+
+def _split_batch(features, labels, number_of_shards, device):
+ """Split input features and labes into batches."""
+
+ def split_dictionary(dictionary):
+ shards = [{} for _ in range(number_of_shards)]
+ for name, tensor in six.iteritems(dictionary):
+ for i, shard in enumerate(array_ops.split(tensor, number_of_shards)):
+ shards[i][name] = shard
+ return shards
+
+ with ops_lib.name_scope('split_inputs'):
+ with ops_lib.device(device):
+ if isinstance(features, dict):
+ feature_shards = split_dictionary(features)
+ else:
+ feature_shards = array_ops.split(features, number_of_shards)
+
+ if labels is None:
+ label_shards = None
+ elif isinstance(labels, dict):
+ label_shards = split_dictionary(labels)
+ else:
+ label_shards = array_ops.split(labels, number_of_shards)
+ return feature_shards, label_shards
+
+
+_DEFAULT_NAME_SCOPE_PATTERN = 'tower_{}'
+
+
+def _get_loss_towers(model_fn,
+ mode,
+ features,
+ labels,
+ params,
+ config,
+ devices,
+ local_ps_device,
+ name_scope_pattern=_DEFAULT_NAME_SCOPE_PATTERN):
+ """Replicate the loss computation across devices."""
+ tower_specs = []
+
+ model_fn_args = util.fn_args(model_fn)
+ optional_params = {}
+ if 'params' in model_fn_args:
+ optional_params['params'] = copy.deepcopy(params)
+ if 'config' in model_fn_args:
+ optional_params['config'] = copy.deepcopy(config)
+
+ for i, device in enumerate(devices):
+ is_the_first_tower = (i == 0)
+
+ device_setter = _local_device_setter(
+ worker_device=device, ps_device=local_ps_device)
+
+ # We would like to preserve the names of the variables and ops that a user
+ # might be relying on. Names with prefix are going to resolve to variables
+ # and ops of the first tower.
+ name_scope = name_scope_pattern
+ if is_the_first_tower:
+ name_scope = ''
+
+ with variable_scope.variable_scope('', reuse=not is_the_first_tower):
+ with ops_lib.name_scope(name_scope.format(i)):
+ with ops_lib.device(device_setter):
+ labels_shard = None
+ if labels:
+ labels_shard = labels[i]
+
+ tower_specs.append(
+ model_fn(
+ mode=mode,
+ features=features[i],
+ labels=labels_shard,
+ **optional_params))
+ return tower_specs
+
+
+def _local_device_setter(ps_device, worker_device):
+ """A device setter that puts distributes Var/Ops to PS/workers."""
+ ps_ops = ['Variable', 'VariableV2', 'VarHandleOp']
+
+ def local_device_chooser(op):
+ current_device = framework_device.DeviceSpec.from_string(op.device or '')
+
+ node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
+ if node_def.op in ps_ops:
+ ps_device_spec = framework_device.DeviceSpec.from_string(
+ '{}'.format(ps_device))
+
+ ps_device_spec.merge_from(current_device)
+ return ps_device_spec.to_string()
+ else:
+ worker_device_spec = framework_device.DeviceSpec.from_string(
+ worker_device or '')
+ worker_device_spec.merge_from(current_device)
+ return worker_device_spec.to_string()
+
+ return local_device_chooser
+
+
+def _minimize_towers(tower_specs, optimizer):
+ """Aggregate and apply gradients for computed losses."""
+ grad_lists = {}
+ for tower_spec in tower_specs:
+ with ops_lib.device(tower_spec.loss.device):
+ variables = variables_lib.trainable_variables()
+ gradients = gradients_lib.gradients(tower_spec.loss, variables)
+
+ for var, grad in zip(variables, gradients):
+ if grad is not None:
+ grad_lists.setdefault(var, []).append(grad)
+
+ aggregated_grads = []
+ with ops_lib.name_scope('gradient_aggregating'):
+ for var, grads in six.iteritems(grad_lists):
+ grad = _compute_sum_on_device(grads, var.device)
+ aggregated_grads.append((grad, var))
+
+ train_op = optimizer.apply_gradients(
+ aggregated_grads, global_step=training_util.get_global_step())
+
+ return train_op
+
+
+def _call_optimizer_fn(optimizer_fn, params):
+ arguments = {}
+ optimizer_fn_arguments = util.fn_args(optimizer_fn)
+ if 'params' in optimizer_fn_arguments:
+ arguments['params'] = params
+ return optimizer_fn(**arguments)
+
+
+def _compute_sum_on_device(values, device, name=None):
+ with ops_lib.device(device):
+ return math_ops.add_n(values, name=name)
+
+
+def _train_spec(tower_specs,
+ train_op,
+ aggregation_device,
+ aggregated_loss_name='loss'):
+ """Populate replicated EstimatorSpec for `GraphKeys.TRAIN`."""
+ estimator_spec = tower_specs[0]._asdict()
+ estimator_spec['mode'] = model_fn_lib.ModeKeys.TRAIN
+ estimator_spec['train_op'] = train_op
+ estimator_spec['loss'] = _compute_sum_on_device(
+ [spec.loss for spec in tower_specs], aggregation_device,
+ aggregated_loss_name)
+ return model_fn_lib.EstimatorSpec(**estimator_spec)
+
+
+def _eval_spec(tower_specs, aggregation_device, aggregated_loss_name='loss'):
+ """Populate replicated EstimatorSpec for `GraphKeys.EVAL`."""
+ estimator_spec = tower_specs[0]._asdict()
+ estimator_spec['mode'] = model_fn_lib.ModeKeys.EVAL
+ estimator_spec['loss'] = _compute_sum_on_device(
+ [spec.loss for spec in tower_specs], aggregation_device,
+ aggregated_loss_name)
+
+ eval_metric_ops_lists = {}
+ for tower_spec in tower_specs:
+ metrics = tower_spec.eval_metric_ops or {}
+ for name, (_, update_op) in six.iteritems(metrics):
+ update_ops = eval_metric_ops_lists.setdefault(name, ([]))
+ update_ops.append(update_op)
+
+ eval_metric_ops = {}
+ for name, (metric_tensor, _) in six.iteritems(tower_specs[0].eval_metric_ops):
+ with ops_lib.control_dependencies(eval_metric_ops_lists[name]):
+ # This operation reduces local variables across all metrics, yet is
+ # called for every metric. This is redundant and it's done because
+ # it is hard to know what local variables correspond to what metric.
+ # Estimator is going to execute all `reduced_update_op`s as part of
+ # a group inside a single `Session.run()` call, which will avoid duplicate
+ # computation.
+ reduced_update_op = _reduce_metric_variables(len(tower_specs))
+ eval_metric_ops[name] = (metric_tensor, reduced_update_op)
+
+ estimator_spec['eval_metric_ops'] = eval_metric_ops
+ return model_fn_lib.EstimatorSpec(**estimator_spec)
+
+
+def _reduce_metric_variables(number_of_towers):
+ """Aggregate local variables used in metrics into the first tower."""
+ if number_of_towers == 1:
+ return control_flow_ops.no_op()
+
+ metric_variables = ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)
+ variables_per_tower = len(metric_variables) // number_of_towers
+
+ if len(metric_variables) % number_of_towers != 0:
+ raise ValueError(
+ 'Different `EstimatorSpec.eval_metric_ops` across `model_fn()` calls.'
+ ' Expected {} local variables, but got {} instead.'.format(
+ variables_per_tower * number_of_towers, len(metric_variables)))
+
+ # `metric_variables` has the size of `variables_per_tower` x
+ # number_of_towers. Each tower is produced by calling the same model_fn.
+ # First `variables_per_tower` correspond to the first tower. Each such
+ # variable has an replica at the `(variables_per_tower * i)` position, where
+ # `i` is `[1.. number_of_towers]`. We are going to add values from replicas
+ # to each variable of the first tower. We then zero out replica values, so
+ # that `_reduce_metric_variables` operation is idempotent. If a metric
+ # is then computed based on local variables from the first tower, then the
+ # resulting metric is an estimate for all `number_of_towers` towers.
+ ops = []
+ for i in range(0, variables_per_tower):
+ next_replica_id = i + variables_per_tower
+ replicas = [
+ metric_variables[replica_id]
+ for replica_id in range(next_replica_id, len(metric_variables),
+ variables_per_tower)
+ ] # `replicas` doesn't contain the first-tower variable.
+
+ reduce_op = state_ops.assign_add(metric_variables[i],
+ math_ops.add_n(replicas))
+
+ with ops_lib.control_dependencies([reduce_op]):
+ for replica in replicas:
+ zeros_for_replica = array_ops.zeros(
+ array_ops.shape(replica), dtype=replica.dtype)
+ zero_out_replica_op = state_ops.assign(replica, zeros_for_replica)
+ ops.append(zero_out_replica_op)
+
+ return control_flow_ops.group(*ops)
+
+
+def _predict_spec(tower_specs, aggregation_device):
+ """Populate replicated EstimatorSpec for `GraphKeys.PREDICT`."""
+ estimator_spec = tower_specs[0]._asdict()
+ estimator_spec['mode'] = model_fn_lib.ModeKeys.PREDICT
+
+ with ops_lib.device(aggregation_device):
+ estimator_spec['predictions'] = _concat_tensor_dicts(
+ *[tower_spec.predictions for tower_spec in tower_specs])
+
+ export_outputs_dict = _dict_concat(
+ *[tower_spec.export_outputs for tower_spec in tower_specs])
+
+ export_outputs = {}
+ for name, export_output_list in six.iteritems(export_outputs_dict):
+ if isinstance(export_output_list[0], export_output_lib.PredictOutput):
+ export_outputs[name] = export_output_lib.PredictOutput(
+ outputs=_concat_tensor_dicts(*[
+ export_output.outputs for export_output in export_output_list
+ ]))
+ elif isinstance(export_output_list[0],
+ export_output_lib.RegressionOutput):
+ export_outputs[name] = export_output_lib.RegressionOutput(
+ value=array_ops.concat(
+ [export_output.value for export_output in export_output_list],
+ axis=0))
+ elif isinstance(export_output_list[0],
+ export_output_lib.ClassificationOutput):
+ scores = None
+ if export_output_list[0].scores is not None:
+ scores = array_ops.concat(
+ [export_output.scores for export_output in export_output_list],
+ axis=0)
+
+ classes = None
+ if export_output_list[0].classes is not None:
+ classes = array_ops.stack(
+ [export_output.classes for export_output in export_output_list],
+ axis=0)
+
+ export_outputs[name] = export_output_lib.ClassificationOutput(
+ scores=scores, classes=classes)
+
+ estimator_spec['export_outputs'] = export_outputs
+ return model_fn_lib.EstimatorSpec(**estimator_spec)
+
+
+def _concat_tensor_dicts(*tensor_dicts):
+ return {
+ name: array_ops.concat(tensors, axis=0, name=name)
+ for name, tensors in six.iteritems(_dict_concat(*tensor_dicts))
+ }
+
+
+def _dict_concat(*dicts):
+ list_dict = {}
+ for d in dicts:
+ if d is None:
+ continue
+
+ for k, v in six.iteritems(d):
+ list_dict.setdefault(k, []).append(v)
+ return list_dict
diff --git a/tensorflow/contrib/estimator/python/estimator/replicate_model_fn_test.py b/tensorflow/contrib/estimator/python/estimator/replicate_model_fn_test.py
new file mode 100644
index 0000000000..10b47fba5a
--- /dev/null
+++ b/tensorflow/contrib/estimator/python/estimator/replicate_model_fn_test.py
@@ -0,0 +1,901 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for utilities that replicate `Estimator.model_fn` over GPUs."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import re
+import shutil
+import tempfile
+import numpy as np
+import six
+
+from tensorflow.contrib.estimator.python.estimator import replicate_model_fn
+from tensorflow.python.estimator import estimator as estimator_lib
+from tensorflow.python.estimator import model_fn as model_fn_lib
+from tensorflow.python.estimator.canned import dnn
+from tensorflow.python.estimator.canned import optimizers
+from tensorflow.python.estimator.canned import prediction_keys
+from tensorflow.python.estimator.export import export
+from tensorflow.python.estimator.export import export_output
+from tensorflow.python.estimator.inputs import numpy_io
+from tensorflow.python.feature_column import feature_column
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops as ops_lib
+from tensorflow.python.framework import test_util
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import metrics as metrics_lib
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+from tensorflow.python.ops.losses import losses
+from tensorflow.python.platform import gfile
+from tensorflow.python.platform import test
+from tensorflow.python.saved_model import signature_constants
+from tensorflow.python.summary.writer import writer_cache
+from tensorflow.python.training import gradient_descent
+
+
+class DNNClassifierIntegrationTest(test_util.TensorFlowTestCase):
+
+ def setUp(self):
+ self._model_dir = tempfile.mkdtemp()
+
+ def test_complete_flow(self):
+ n_classes = 3
+ input_dimension = 2
+ batch_size = 12
+
+ data = np.linspace(
+ 0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
+ x_data = data.reshape(batch_size, input_dimension)
+ y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
+ train_input_fn = numpy_io.numpy_input_fn(
+ x={'x': x_data},
+ y=y_data,
+ batch_size=batch_size,
+ num_epochs=None,
+ shuffle=True)
+ eval_input_fn = numpy_io.numpy_input_fn(
+ x={'x': x_data}, y=y_data, batch_size=batch_size, shuffle=False)
+ predict_input_fn = numpy_io.numpy_input_fn(
+ x={'x': x_data}, batch_size=batch_size, shuffle=False)
+
+ feature_columns = [
+ feature_column.numeric_column('x', shape=(input_dimension,))
+ ]
+
+ estimator = dnn.DNNClassifier(
+ hidden_units=(2, 2),
+ feature_columns=feature_columns,
+ n_classes=n_classes,
+ model_dir=self._model_dir)
+
+ def optimizer_fn():
+ return optimizers.get_optimizer_instance('Adagrad', learning_rate=0.05)
+
+ # TODO(isaprykin): Switch Estimator to use allow_soft_placement=True
+ # during export_savedmodel and then switch this test to replicate over
+ # GPUs instead of CPUs.
+ estimator = estimator_lib.Estimator(
+ model_fn=replicate_model_fn.replicate_model_fn(
+ estimator.model_fn,
+ optimizer_fn,
+ devices=['/cpu:0', '/cpu:0', '/cpu:0']),
+ model_dir=estimator.model_dir,
+ config=estimator.config,
+ params=estimator.params)
+
+ num_steps = 10
+ estimator.train(train_input_fn, steps=num_steps)
+
+ scores = estimator.evaluate(eval_input_fn)
+ self.assertEqual(num_steps, scores[ops_lib.GraphKeys.GLOBAL_STEP])
+ self.assertIn('loss', six.iterkeys(scores))
+
+ predicted_proba = np.array([
+ x[prediction_keys.PredictionKeys.PROBABILITIES]
+ for x in estimator.predict(predict_input_fn)
+ ])
+ self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
+
+ feature_spec = feature_column.make_parse_example_spec(feature_columns)
+ serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
+ feature_spec)
+ export_dir = estimator.export_savedmodel(tempfile.mkdtemp(),
+ serving_input_receiver_fn)
+ self.assertTrue(gfile.Exists(export_dir))
+
+ def _as_label(self, data_in_float):
+ return np.rint(data_in_float).astype(np.int64)
+
+ def tearDown(self):
+ if self._model_dir:
+ writer_cache.FileWriterCache.clear()
+ shutil.rmtree(self._model_dir)
+
+
+class ReplicateModelTest(test_util.TensorFlowTestCase):
+
+ def model_fn(self, mode, features, labels, params):
+ c = variable_scope.get_variable(
+ 'c',
+ initializer=constant_op.constant(10, dtype=dtypes.float64),
+ dtype=dtypes.float64)
+
+ predictions = math_ops.multiply(features, c)
+
+ loss = None
+ if mode is not model_fn_lib.ModeKeys.PREDICT:
+ loss = losses.absolute_difference(
+ labels=labels,
+ predictions=predictions,
+ reduction=losses.Reduction.SUM)
+ loss = math_ops.reduce_sum(loss)
+
+ metrics = {
+ 'accuracy': metrics_lib.accuracy(labels, predictions),
+ 'auc': metrics_lib.auc(labels, predictions)
+ }
+
+ return model_fn_lib.EstimatorSpec(
+ mode=mode,
+ loss=loss,
+ eval_metric_ops=metrics,
+ predictions={'probabilities': predictions},
+ train_op=control_flow_ops.no_op()) # This train_op isn't actually used.
+
+ def optimizer_fn(self, params):
+ return gradient_descent.GradientDescentOptimizer(params['learning_rate'])
+
+ @property
+ def params(self):
+ params = {}
+ params['learning_rate'] = 1.0
+ return params
+
+ def test_train(self):
+ features = np.array([[1.0], [2.0]])
+ labels = np.array([[1.0], [2.0]])
+
+ with self.test_session() as session:
+ replicated_model_fn = replicate_model_fn.replicate_model_fn(
+ self.model_fn, self.optimizer_fn, devices=['/gpu:0', '/gpu:1'])
+ estimator_spec = replicated_model_fn(model_fn_lib.ModeKeys.TRAIN,
+ features, labels, self.params)
+ session.run(variables.global_variables_initializer())
+
+ # loss = feature * c - label
+ total_loss = (1.0 * 10 - 1.0) + (2.0 * 10 - 2.0)
+ self.assertEqual(total_loss, session.run(estimator_spec.loss))
+
+ # loss' of c is 3.
+ # new value of c = 10 - learning rate * 3 = 7.0.
+ session.run(estimator_spec.train_op)
+ with variable_scope.variable_scope('', reuse=True):
+ c = variable_scope.get_variable('c', dtype=dtypes.float64)
+ self.assertEqual(7.0, session.run(c))
+
+ def test_train_spec_with_optimizer_without_params(self):
+
+ def optimizer_fn_without_params():
+ return gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
+
+ features = np.array([[1.0], [2.0]])
+ labels = np.array([[1.0], [2.0]])
+
+ with self.test_session() as session: # pylint: disable=unused-variable
+ replicated_model_fn = replicate_model_fn.replicate_model_fn(
+ self.model_fn,
+ optimizer_fn_without_params,
+ devices=['/gpu:0', '/gpu:1'])
+ # This call is going to fail if `replicated_model_fn` is still passing
+ # `params` inside `optimizer_fn`, even though the latter doesn't take any:
+ estimator_spec = replicated_model_fn(model_fn_lib.ModeKeys.TRAIN,
+ features, labels, self.params)
+ del estimator_spec
+
+ def test_eval(self):
+ features = np.array([[0.01], [0.002]])
+ labels = np.array([[0.01], [0.02]])
+
+ with self.test_session() as session:
+ replicated_model_fn = replicate_model_fn.replicate_model_fn(
+ self.model_fn, self.optimizer_fn, devices=['/gpu:0', '/gpu:1'])
+ estimator_spec = replicated_model_fn(model_fn_lib.ModeKeys.EVAL, features,
+ labels, self.params)
+ session.run(variables.local_variables_initializer())
+ session.run(variables.global_variables_initializer())
+
+ accuracy, a = estimator_spec.eval_metric_ops['accuracy']
+ auc, b = estimator_spec.eval_metric_ops['auc']
+
+ session.run([a, b])
+ accuracy = session.run(accuracy)
+ auc = session.run(auc)
+
+ # Accuracy is 0.0 (no match) in the first tower.
+ # Accuracy is 1.0 (match) in the second tower, since the feature
+ # times weight "c" happened to be equal to the label.
+ total_loss = ((0.01 * 10 - 0.01) + (0.002 * 10 - 0.02))
+
+ self.assertNear((0.0 + 1.0) / 2.0, accuracy, 0.01)
+ self.assertEqual(0, auc)
+ self.assertNear(total_loss, session.run(estimator_spec.loss), 0.01)
+
+ def test_predict(self):
+ features = np.array([[0.01], [0.002]])
+ labels = np.array([[0.01], [0.02]])
+
+ with self.test_session() as session:
+ replicated_model_fn = replicate_model_fn.replicate_model_fn(
+ self.model_fn, self.optimizer_fn, devices=['/gpu:0', '/gpu:1'])
+ estimator_spec = replicated_model_fn(model_fn_lib.ModeKeys.PREDICT,
+ features, labels, self.params)
+ session.run(variables.global_variables_initializer())
+
+ self.assertAllClose({
+ 'probabilities': np.array([[0.1], [0.02]])
+ }, session.run(estimator_spec.predictions))
+
+ def test_train_single_tower(self):
+ features = np.array([[1.0], [2.0]])
+ labels = np.array([[1.0], [2.0]])
+
+ with self.test_session() as session:
+ replicated_model_fn = replicate_model_fn.replicate_model_fn(
+ self.model_fn, self.optimizer_fn)
+ estimator_spec = replicated_model_fn(model_fn_lib.ModeKeys.TRAIN,
+ features, labels, self.params)
+ session.run(variables.global_variables_initializer())
+
+ # loss = feature * c - label
+ total_loss = (1.0 * 10 - 1.0) + (2.0 * 10 - 2.0)
+ self.assertEqual(total_loss, session.run(estimator_spec.loss))
+
+ # loss' of c is 3.
+ # new value of c = 10 - learning rate * 3 = 7.0.
+ session.run(estimator_spec.train_op)
+ with variable_scope.variable_scope('', reuse=True):
+ c = variable_scope.get_variable('c', dtype=dtypes.float64)
+ self.assertEqual(7.0, session.run(c))
+
+ def test_eval_single_tower(self):
+ features = np.array([[0.01], [0.002]])
+ labels = np.array([[0.01], [0.02]])
+
+ with self.test_session() as session:
+ replicated_model_fn = replicate_model_fn.replicate_model_fn(
+ self.model_fn, self.optimizer_fn, devices=['/gpu:0'])
+ estimator_spec = replicated_model_fn(model_fn_lib.ModeKeys.EVAL, features,
+ labels, self.params)
+ session.run(variables.local_variables_initializer())
+ session.run(variables.global_variables_initializer())
+
+ accuracy, a = estimator_spec.eval_metric_ops['accuracy']
+ auc, b = estimator_spec.eval_metric_ops['auc']
+
+ session.run([a, b])
+ accuracy = session.run(accuracy)
+ auc = session.run(auc)
+
+ # Accuracy is 0.0 (no match) in the first tower.
+ # Accuracy is 1.0 (match) in the second tower, since the feature
+ # times weight "c" happened to be equal to the label.
+ total_loss = ((0.01 * 10 - 0.01) + (0.002 * 10 - 0.02))
+
+ self.assertNear((0.0 + 1.0) / 2.0, accuracy, 0.01)
+ self.assertEqual(0, auc)
+ self.assertNear(total_loss, session.run(estimator_spec.loss), 0.01)
+
+ def test_predict_single_tower(self):
+ features = np.array([[0.01], [0.002]])
+ labels = np.array([[0.01], [0.02]])
+
+ with self.test_session() as session:
+ replicated_model_fn = replicate_model_fn.replicate_model_fn(
+ self.model_fn, self.optimizer_fn, devices=['/gpu:0'])
+ estimator_spec = replicated_model_fn(model_fn_lib.ModeKeys.PREDICT,
+ features, labels, self.params)
+ session.run(variables.global_variables_initializer())
+
+ self.assertAllClose({
+ 'probabilities': np.array([[0.1], [0.02]])
+ }, session.run(estimator_spec.predictions))
+
+
+class GetLossTowersTest(test_util.TensorFlowTestCase):
+
+ def model_fn(self, mode, features, labels, params):
+ c = variable_scope.get_variable(
+ 'c',
+ initializer=constant_op.constant(0.25, dtype=dtypes.float64),
+ dtype=dtypes.float64)
+
+ predictions = math_ops.add(np.array([0.1, 0.2, 0.3, features[0]]), c)
+ labels = np.array([0.1, 0.2, 0.3, labels[0]])
+
+ loss = losses.absolute_difference(
+ labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)
+
+ return model_fn_lib.EstimatorSpec(mode=mode, loss=math_ops.reduce_sum(loss))
+
+ def test_gradients_are_computed(self):
+ with self.test_session() as session:
+ tower_specs = replicate_model_fn._get_loss_towers(
+ self.model_fn,
+ mode=None,
+ features=[[0.6], [1.6]],
+ labels=[[0.6], [0.6]],
+ params=None,
+ config=None,
+ devices=['/gpu:0', '/gpu:1'],
+ local_ps_device='/gpu:0',
+ name_scope_pattern='test_tower_{}')
+ session.run(variables.global_variables_initializer())
+
+ self.assertEqual(len(tower_specs), 2)
+
+ self.assertEqual('/device:GPU:0', tower_specs[0].loss.device)
+ self.assertEqual('Sum:0', tower_specs[0].loss.name)
+ self.assertEqual(1.0, session.run(tower_specs[0].loss))
+
+ self.assertEqual('/device:GPU:1', tower_specs[1].loss.device)
+ self.assertEqual('test_tower_1/Sum:0', tower_specs[1].loss.name)
+ # The input batch for the second tower had a loss that is 1.0
+ # bigger: 0.6 vs 1.6.
+ self.assertEqual(2.0, session.run(tower_specs[1].loss))
+
+ self.assertEqual(1, len(variables.global_variables()))
+ self.assertEqual(1, len(variables.trainable_variables()))
+
+ with variable_scope.variable_scope('', reuse=True):
+ c = variable_scope.get_variable('c', dtype=dtypes.float64)
+ self.assertEqual(0.25, session.run(c))
+
+
+class SplitBatchTest(test_util.TensorFlowTestCase):
+
+ def evaluate_shards(self, first_list, second_list):
+ evaluate_items = lambda x: x.eval()
+ return list(map(evaluate_items, first_list)), list(
+ map(evaluate_items, second_list))
+
+ def test_simple_half_split(self):
+ with self.test_session() as session: # pylint: disable=unused-variable
+ features = [0.0, 1.0, 2.0, 3.0]
+ labels = [10.0, 11.0, 12.0, 13.0]
+ feature_shards, label_shards = replicate_model_fn._split_batch(
+ features, labels, 2, device='/gpu:0')
+
+ feature_shards, label_shards = self.evaluate_shards(
+ feature_shards, label_shards)
+
+ self.assertAllEqual([[0.0, 1.0], [2.0, 3.0]], feature_shards)
+ self.assertAllEqual([[10.0, 11.0], [12.0, 13.0]], label_shards)
+
+ def test_to_each_their_own(self):
+ with self.test_session() as session: # pylint: disable=unused-variable
+ features = [0.0, 1.0, 2.0, 3.0]
+ labels = [10.0, 11.0, 12.0, 13.0]
+ feature_shards, label_shards = replicate_model_fn._split_batch(
+ features, labels, 4, device='/gpu:0')
+
+ feature_shards, label_shards = self.evaluate_shards(
+ feature_shards, label_shards)
+
+ self.assertAllEqual([[0.0], [1.0], [2.0], [3.0]], feature_shards)
+ self.assertAllEqual([[10.0], [11.0], [12.0], [13.0]], label_shards)
+
+ def test_one_batch(self):
+ with self.test_session() as session: # pylint: disable=unused-variable
+ features = [0.0, 1.0, 2.0, 3.0]
+ labels = [10.0, 11.0, 12.0, 13.0]
+ feature_shards, label_shards = replicate_model_fn._split_batch(
+ features, labels, 1, device='/gpu:0')
+
+ feature_shards, label_shards = self.evaluate_shards(
+ feature_shards, label_shards)
+
+ self.assertAllEqual([[0.0, 1.0, 2.0, 3.0]], feature_shards)
+ self.assertAllEqual([[10.0, 11.0, 12.0, 13.0]], label_shards)
+
+ def test_half_split_in_dictionary(self):
+ with self.test_session() as session: # pylint: disable=unused-variable
+ features = {'first': [0.0, 1.0, 2.0, 3.0], 'second': [4.0, 5.0, 6.0, 7.0]}
+ labels = [10.0, 11.0, 12.0, 13.0]
+
+ feature_shards, label_shards = replicate_model_fn._split_batch(
+ features, labels, 2, device='/gpu:0')
+
+ self.assertAllEqual([0.0, 1.0], feature_shards[0]['first'].eval())
+ self.assertAllEqual([4.0, 5.0], feature_shards[0]['second'].eval())
+ self.assertAllEqual([2.0, 3.0], feature_shards[1]['first'].eval())
+ self.assertAllEqual([6.0, 7.0], feature_shards[1]['second'].eval())
+ self.assertAllEqual([10.0, 11.0], label_shards[0].eval())
+ self.assertAllEqual([12.0, 13.0], label_shards[1].eval())
+
+ def test_one_batch_in_dictionary(self):
+ with self.test_session() as session: # pylint: disable=unused-variable
+ features = {'first': [0.0, 1.0, 2.0, 3.0], 'second': [4.0, 5.0, 6.0, 7.0]}
+ labels = [10.0, 11.0, 12.0, 13.0]
+
+ feature_shards, label_shards = replicate_model_fn._split_batch(
+ features, labels, 1, device='/gpu:0')
+
+ self.assertAllEqual([0.0, 1.0, 2.0, 3.0],
+ feature_shards[0]['first'].eval())
+ self.assertAllEqual([4.0, 5.0, 6.0, 7.0],
+ feature_shards[0]['second'].eval())
+ self.assertAllEqual([10.0, 11.0, 12.0, 13.0], label_shards[0].eval())
+
+ def test_feature_and_label_dictionaries(self):
+ with self.test_session() as session: # pylint: disable=unused-variable
+ features = {'first': [0.0, 1.0, 2.0, 3.0], 'second': [4.0, 5.0, 6.0, 7.0]}
+ labels = {'first': [10.0, 11.0], 'second': [12.0, 13.0]}
+
+ feature_shards, label_shards = replicate_model_fn._split_batch(
+ features, labels, 2, device='/gpu:0')
+
+ self.assertAllEqual([0.0, 1.0], feature_shards[0]['first'].eval())
+ self.assertAllEqual([4.0, 5.0], feature_shards[0]['second'].eval())
+ self.assertAllEqual([2.0, 3.0], feature_shards[1]['first'].eval())
+ self.assertAllEqual([6.0, 7.0], feature_shards[1]['second'].eval())
+ self.assertAllEqual([10.0], label_shards[0]['first'].eval())
+ self.assertAllEqual([12.0], label_shards[0]['second'].eval())
+ self.assertAllEqual([11], label_shards[1]['first'].eval())
+ self.assertAllEqual([13.0], label_shards[1]['second'].eval())
+
+
+class TrainSpecTest(test_util.TensorFlowTestCase):
+
+ expected_predictions = {}
+
+ def create_estimator_spec(self, loss):
+ return model_fn_lib.EstimatorSpec(
+ mode=model_fn_lib.ModeKeys.TRAIN,
+ loss=loss,
+ train_op=loss, # Not used; currently required.
+ predictions=self.expected_predictions)
+
+ def create_constant_loss(self, loss_value):
+ return constant_op.constant(loss_value, dtype=dtypes.float64)
+
+ def test_example(self):
+ with self.test_session() as session:
+ tower_losses = list(map(self.create_constant_loss, [2, 4, 6]))
+ tower_specs = list(map(self.create_estimator_spec, tower_losses))
+
+ expected_train_op = tower_losses[1]
+
+ estimator_spec = replicate_model_fn._train_spec(
+ tower_specs, expected_train_op, aggregation_device='/gpu:0')
+
+ self.assertEqual(expected_train_op, estimator_spec.train_op)
+ self.assertEqual(2 + 4 + 6, session.run(estimator_spec.loss))
+ self.assertEqual(self.expected_predictions, estimator_spec.predictions)
+
+
+class EvalSpecTest(test_util.TensorFlowTestCase):
+
+ def create_estimator_spec(self, loss, metrics):
+ return model_fn_lib.EstimatorSpec(
+ mode=model_fn_lib.ModeKeys.EVAL, loss=loss, eval_metric_ops=metrics)
+
+ def create_constant_loss(self, loss_value):
+ return constant_op.constant(loss_value, dtype=dtypes.float64)
+
+ def create_eval_metrics(self, noise):
+ predictions = np.array([0.1, 0.2, 0.3, 0.6 + noise])
+ labels = np.array([0.1, 0.2, 0.3, 0.6])
+
+ metrics = {
+ 'accuracy': metrics_lib.accuracy(labels, predictions),
+ 'auc': metrics_lib.auc(labels, predictions)
+ }
+ return metrics
+
+ def test_example(self):
+ with self.test_session() as session:
+ tower_losses = map(self.create_constant_loss, [2, 4, 6])
+ tower_metrics = map(self.create_eval_metrics, [0, 0.2, 0.3])
+ tower_specs = [
+ self.create_estimator_spec(l, m)
+ for l, m in zip(tower_losses, tower_metrics)
+ ]
+ session.run(variables.local_variables_initializer())
+
+ estimator_spec = replicate_model_fn._eval_spec(
+ tower_specs, aggregation_device='/device:GPU:0')
+
+ accuracy, a = estimator_spec.eval_metric_ops['accuracy']
+ auc, b = estimator_spec.eval_metric_ops['auc']
+
+ self.assertEqual('/device:CPU:0', accuracy.device)
+ self.assertEqual('/device:CPU:0', auc.device)
+
+ session.run([a, b])
+ accuracy = session.run(accuracy)
+ auc = session.run(auc)
+
+ self.assertNear((12 - 2) / 12, accuracy, 0.01)
+ self.assertEqual(0, auc)
+ self.assertEqual(2 + 4 + 6, session.run(estimator_spec.loss))
+
+ def test_handles_single_tower(self):
+ with self.test_session() as session:
+ tower_losses = map(self.create_constant_loss, [5])
+ tower_metrics = map(self.create_eval_metrics, [0.2])
+ tower_specs = [
+ self.create_estimator_spec(l, m)
+ for l, m in zip(tower_losses, tower_metrics)
+ ]
+ session.run(variables.local_variables_initializer())
+
+ estimator_spec = replicate_model_fn._eval_spec(
+ tower_specs, aggregation_device='/device:GPU:0')
+
+ accuracy, a = estimator_spec.eval_metric_ops['accuracy']
+ auc, b = estimator_spec.eval_metric_ops['auc']
+
+ self.assertEqual('/device:CPU:0', accuracy.device)
+ self.assertEqual('/device:CPU:0', auc.device)
+
+ session.run([a, b])
+ accuracy = session.run(accuracy)
+ auc = session.run(auc)
+
+ self.assertNear((4 - 1) / 4, accuracy, 0.01)
+ self.assertEqual(0, auc)
+ self.assertEqual(5, session.run(estimator_spec.loss))
+
+
+class PredictSpecTest(test_util.TensorFlowTestCase):
+
+ def model_fn(self, mode, features, labels, params):
+ c = variable_scope.get_variable(
+ 'c',
+ initializer=constant_op.constant(0.25, dtype=dtypes.float64),
+ dtype=dtypes.float64)
+
+ predictions = math_ops.add(np.array([features[0], features[0]]), c)
+
+ return model_fn_lib.EstimatorSpec(
+ mode=model_fn_lib.ModeKeys.PREDICT,
+ predictions={
+ 'probabilities': predictions
+ })
+
+ def test_example(self):
+ with self.test_session() as session:
+ tower_specs = replicate_model_fn._get_loss_towers(
+ self.model_fn,
+ mode=None,
+ features=[[0.1], [0.2]],
+ labels=[[], []],
+ params=None,
+ config=None,
+ devices=['/gpu:0', '/gpu:1'],
+ local_ps_device='/gpu:0',
+ )
+ session.run(variables.global_variables_initializer())
+
+ estimator_spec = replicate_model_fn._predict_spec(
+ tower_specs, aggregation_device='/gpu:0')
+
+ self.assertEqual('/device:GPU:0',
+ estimator_spec.predictions['probabilities'].device)
+ self.assertAllClose({
+ 'probabilities': np.array([0.35, 0.35, 0.45, 0.45])
+ }, session.run(estimator_spec.predictions))
+
+
+class ReduceMetricVariablesTest(test_util.TensorFlowTestCase):
+
+ def create_metric_variable(self, initial_value, name):
+ return variable_scope.variable(
+ initial_value,
+ trainable=False,
+ collections=[ops_lib.GraphKeys.METRIC_VARIABLES],
+ validate_shape=True,
+ name=name)
+
+ def create_tower_metrics(self, tower_id):
+ with variable_scope.variable_scope('', reuse=(tower_id != 0)):
+ self.create_metric_variable(1.3 * (tower_id + 1), 'total')
+ self.create_metric_variable(2.3 * (tower_id + 1), 'count')
+ self.create_metric_variable(
+ np.array([3.3, 3.5, 3.7]) * (tower_id + 1), 'total')
+
+ def test_example(self):
+ with self.test_session() as session:
+ for tower_id in range(3):
+ self.create_tower_metrics(tower_id)
+
+ session.run(
+ variables.variables_initializer(
+ ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))
+
+ session.run(
+ replicate_model_fn._reduce_metric_variables(number_of_towers=3))
+
+ # 1st tower = 1.3, 2.3, [3.3, 3.5, 3.7]
+ # 2nd tower = 2.6, 4.6, [6.6, 7.0, 7.4]
+ # 3rd tower = 3.9, 6.9, [9.9, 10.5, 11.1]
+ # Reduced = 7.8, 13.8, [19.8, 21.0, 22.2]
+ # Towers are accumulated in the first tower.
+ local_metrics = session.run(
+ ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES))
+
+ self.assertNear(7.8, local_metrics[0], 0.01)
+ self.assertNear(13.8, local_metrics[1], 0.01)
+ self.assertAllClose([19.8, 21., 22.1], local_metrics[2], 0.01)
+ self.assertNear(0.0, local_metrics[3], 0.01)
+ self.assertNear(0.0, local_metrics[4], 0.01)
+ self.assertAllClose([0.0, 0.0, 0.0], local_metrics[5], 0.01)
+ self.assertNear(0.0, local_metrics[6], 0.01)
+ self.assertNear(0.0, local_metrics[7], 0.01)
+ self.assertAllClose([0.0, 0.0, 0.0], local_metrics[8], 0.01)
+
+ def test_reduce_is_idempotent(self):
+ with self.test_session() as session:
+ for tower_id in range(3):
+ self.create_tower_metrics(tower_id)
+
+ session.run(
+ variables.variables_initializer(
+ ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))
+
+ for _ in range(20):
+ session.run(
+ replicate_model_fn._reduce_metric_variables(number_of_towers=3))
+
+ local_metrics = session.run(
+ ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES))
+
+ self.assertNear(7.8, local_metrics[0], 0.01)
+ self.assertNear(13.8, local_metrics[1], 0.01)
+ self.assertAllClose([19.8, 21., 22.1], local_metrics[2], 0.01)
+ self.assertNear(0.0, local_metrics[3], 0.01)
+ self.assertNear(0.0, local_metrics[4], 0.01)
+ self.assertAllClose([0.0, 0.0, 0.0], local_metrics[5], 0.01)
+ self.assertNear(0.0, local_metrics[6], 0.01)
+ self.assertNear(0.0, local_metrics[7], 0.01)
+ self.assertAllClose([0.0, 0.0, 0.0], local_metrics[8], 0.01)
+
+ def test_handles_single_tower(self):
+ with self.test_session() as session:
+ self.create_tower_metrics(0)
+ session.run(
+ variables.variables_initializer(
+ ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))
+
+ session.run(
+ replicate_model_fn._reduce_metric_variables(number_of_towers=1))
+
+ local_metrics = session.run(
+ ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES))
+
+ self.assertNear(1.3, local_metrics[0], 0.01)
+ self.assertNear(2.3, local_metrics[1], 0.01)
+ self.assertAllClose([3.3, 3.5, 3.7], local_metrics[2], 0.01)
+
+ def test_doesnt_accept_uneven_number_of_variables(self):
+ with self.test_session() as session:
+ for tower_id in range(3):
+ self.create_tower_metrics(tower_id)
+ self.create_metric_variable(-1.0, 'oddball')
+
+ session.run(
+ variables.variables_initializer(
+ ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))
+
+ with self.assertRaisesRegexp(ValueError, ''):
+ session.run(
+ replicate_model_fn._reduce_metric_variables(number_of_towers=3))
+
+
+class MergeExportOutputsTest(test_util.TensorFlowTestCase):
+
+ def optimizer_fn(self):
+ return gradient_descent.GradientDescentOptimizer(1.0)
+
+ def model_fn(self, mode, features, labels, params):
+ c = variable_scope.get_variable(
+ 'c',
+ initializer=constant_op.constant(10, dtype=dtypes.float64),
+ dtype=dtypes.float64)
+
+ predictions = {'probabilities': math_ops.multiply(features, c)}
+ loss = losses.absolute_difference(
+ labels=labels,
+ predictions=predictions['probabilities'],
+ reduction=losses.Reduction.SUM)
+
+ metrics = {
+ 'accuracy': metrics_lib.accuracy(labels, predictions['probabilities']),
+ 'auc': metrics_lib.auc(labels, predictions['probabilities'])
+ }
+ tensor_string_repr = str(features)
+ classes = constant_op.constant(
+ re.search('(split_inputs/split:[0-9])', tensor_string_repr).group(1),
+ dtype=dtypes.string)
+
+ export_outputs = {
+ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
+ export_output.PredictOutput(predictions),
+ 'classification_output':
+ export_output.ClassificationOutput(predictions['probabilities'],
+ classes),
+ 'classification_scores':
+ export_output.ClassificationOutput(
+ scores=predictions['probabilities']),
+ 'classification_classes':
+ export_output.ClassificationOutput(classes=classes),
+ 'regression_output':
+ export_output.RegressionOutput(predictions['probabilities']),
+ }
+
+ return model_fn_lib.EstimatorSpec(
+ mode=mode,
+ loss=math_ops.reduce_sum(loss),
+ eval_metric_ops=metrics,
+ predictions=predictions,
+ train_op=loss, # This train_op isn't actually used.
+ export_outputs=export_outputs)
+
+ def replicate_estimator_spec(self, session):
+ features = np.array([0.01, 0.002])
+ labels = np.array([0.01, 0.02])
+
+ replicated_model_fn = replicate_model_fn.replicate_model_fn(
+ self.model_fn, self.optimizer_fn, devices=['/gpu:0', '/gpu:1'])
+ estimator_spec = replicated_model_fn(model_fn_lib.ModeKeys.PREDICT,
+ features, labels, {})
+ session.run(variables.global_variables_initializer())
+ return estimator_spec
+
+ def test_merde_predict_output(self):
+ with self.test_session() as session:
+ estimator_spec = self.replicate_estimator_spec(session)
+ self.assertAllClose(
+ {
+ 'probabilities': np.array([0.1, 0.02])
+ },
+ session.run(estimator_spec.export_outputs[
+ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY].outputs))
+
+ def test_merge_classification_output_scores_classes(self):
+ with self.test_session() as session:
+ estimator_spec = self.replicate_estimator_spec(session)
+ self.assertAllClose(
+ [0.1, 0.02],
+ session.run(
+ estimator_spec.export_outputs['classification_output'].scores))
+ self.assertAllEqual(
+ [b'split_inputs/split:0', b'split_inputs/split:1'],
+ session.run(
+ estimator_spec.export_outputs['classification_output'].classes))
+
+ def test_merge_classification_output_scores(self):
+ with self.test_session() as session:
+ estimator_spec = self.replicate_estimator_spec(session)
+ self.assertAllClose(
+ [0.1, 0.02],
+ session.run(
+ estimator_spec.export_outputs['classification_scores'].scores))
+ self.assertEqual(
+ None, estimator_spec.export_outputs['classification_scores'].classes)
+
+ def test_merge_classification_output_classes(self):
+ with self.test_session() as session:
+ estimator_spec = self.replicate_estimator_spec(session)
+ self.assertAllEqual(
+ [b'split_inputs/split:0', b'split_inputs/split:1'],
+ session.run(
+ estimator_spec.export_outputs['classification_classes'].classes))
+ self.assertEqual(
+ None, estimator_spec.export_outputs['classification_classes'].scores)
+
+ def test_merge_regression_output(self):
+ with self.test_session() as session:
+ estimator_spec = self.replicate_estimator_spec(session)
+ self.assertAllClose(
+ [0.1, 0.02],
+ session.run(estimator_spec.export_outputs['regression_output'].value))
+
+
+class GetLocalDevicesTest(test_util.TensorFlowTestCase):
+
+ def test_there_is_at_least_a_cpu(self):
+ self.assertTrue(replicate_model_fn._get_local_devices('CPU'))
+
+ def test_there_is_no_xpu(self):
+ self.assertFalse(
+ replicate_model_fn._get_local_devices('XPU')) # XPU doesn't exist.
+
+ def test_whether_there_is_a_gpu(self):
+ self.assertEqual(
+ len(replicate_model_fn._get_local_devices('GPU')),
+ test.is_gpu_available())
+
+
+class LocalDeviceSetterTest(test_util.TensorFlowTestCase):
+
+ def test_vars_are_on_ps_but_ops_are_on_workers(self):
+ local_device_setter = replicate_model_fn._local_device_setter(
+ ps_device='/device:GPU:3', worker_device='/device:GPU:2')
+
+ with ops_lib.device(local_device_setter):
+ c = variables.Variable(0.01)
+ self.assertEqual('/device:GPU:3', c.device)
+
+ cc = variables.Variable(0.02)
+ self.assertEqual('/device:GPU:3', cc.device)
+
+ ccc = variables.Variable(0.03)
+ self.assertEqual('/device:GPU:3', ccc.device)
+
+ c_op = array_ops.concat(c, axis=0)
+ self.assertEqual('/device:GPU:2', c_op.device)
+
+ cc_op = array_ops.concat(cc, axis=0)
+ self.assertEqual('/device:GPU:2', cc_op.device)
+
+
+class ComputeSumWithDevicePlacementTest(test_util.TensorFlowTestCase):
+
+ def test_example(self):
+ with self.test_session() as session:
+ total = replicate_model_fn._compute_sum_on_device(
+ [1.0, 2.0, 3.0, 4.0], device='/device:GPU:0', name='test_sum')
+
+ self.assertEqual('/device:GPU:0', total.device)
+ self.assertEqual('test_sum', total.op.name)
+ self.assertEqual(10.0, session.run(total))
+
+
+class ConcatTensorDictsTest(test_util.TensorFlowTestCase):
+
+ def test_example(self):
+ tensor_dicts = [
+ {
+ 'a': np.array([1.0, 2.0]),
+ 'b': np.array([11.0]),
+ 'c': np.array([21.0]),
+ },
+ {
+ 'a': np.array([3.0]),
+ 'b': np.array([12.0, 13.0]),
+ },
+ {
+ 'b': np.array([14.0]),
+ },
+ ]
+
+ with self.test_session() as session:
+ self.assertAllClose({
+ 'a': np.array([1.0, 2.0, 3.0]),
+ 'b': np.array([11.0, 12.0, 13.0, 14.0]),
+ 'c': np.array([21.0]),
+ }, session.run(replicate_model_fn._concat_tensor_dicts(*tensor_dicts)))
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/framework/BUILD b/tensorflow/contrib/framework/BUILD
index 891425fd8c..e8dad886a1 100644
--- a/tensorflow/contrib/framework/BUILD
+++ b/tensorflow/contrib/framework/BUILD
@@ -24,6 +24,7 @@ tf_custom_op_py_library(
"python/framework/__init__.py",
"python/framework/checkpoint_utils.py",
"python/framework/experimental.py",
+ "python/framework/graph_util.py",
"python/framework/tensor_util.py",
"python/ops/__init__.py",
"python/ops/accumulate_n_v2.py",
@@ -32,6 +33,7 @@ tf_custom_op_py_library(
"python/ops/checkpoint_ops.py",
"python/ops/ops.py",
"python/ops/prettyprint_ops.py",
+ "python/ops/sort_ops.py",
"python/ops/variables.py",
],
dso = [
@@ -232,6 +234,17 @@ py_test(
)
py_test(
+ name = "graph_util_test",
+ srcs = ["python/framework/graph_util_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":framework_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:platform",
+ ],
+)
+
+py_test(
name = "tensor_util_test",
srcs = ["python/framework/tensor_util_test.py"],
srcs_version = "PY2AND3",
@@ -307,6 +320,20 @@ py_test(
],
)
+py_test(
+ name = "sort_ops_test",
+ size = "medium",
+ srcs = ["python/ops/sort_ops_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":framework_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:random_ops",
+ "//third_party/py/numpy",
+ ],
+)
+
filegroup(
name = "all_files",
srcs = glob(
diff --git a/tensorflow/contrib/framework/__init__.py b/tensorflow/contrib/framework/__init__.py
index 8421ba7c04..3f59261183 100644
--- a/tensorflow/contrib/framework/__init__.py
+++ b/tensorflow/contrib/framework/__init__.py
@@ -79,6 +79,8 @@ See the @{$python/contrib.framework} guide.
@@load_embedding_initializer
@@load_linear_multiclass_bias_initializer
@@load_variable_slot_initializer
+
+@@sort
"""
from __future__ import absolute_import
diff --git a/tensorflow/contrib/framework/python/framework/__init__.py b/tensorflow/contrib/framework/python/framework/__init__.py
index c8e6a46854..2d49771ab7 100644
--- a/tensorflow/contrib/framework/python/framework/__init__.py
+++ b/tensorflow/contrib/framework/python/framework/__init__.py
@@ -21,6 +21,7 @@ from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.framework.python.framework.checkpoint_utils import *
from tensorflow.contrib.framework.python.framework.experimental import experimental
+from tensorflow.contrib.framework.python.framework.graph_util import *
from tensorflow.contrib.framework.python.framework.tensor_util import *
# pylint: enable=wildcard-import
from tensorflow.python.util import decorator_utils
diff --git a/tensorflow/contrib/framework/python/framework/graph_util.py b/tensorflow/contrib/framework/python/framework/graph_util.py
new file mode 100644
index 0000000000..8ab8711db4
--- /dev/null
+++ b/tensorflow/contrib/framework/python/framework/graph_util.py
@@ -0,0 +1,128 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Helpers to manipulate a tensor graph in python.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import copy
+import six
+
+# pylint: disable=unused-import
+from tensorflow.core.framework import graph_pb2
+from tensorflow.core.framework import node_def_pb2
+from tensorflow.python.framework.graph_util_impl import _assert_nodes_are_present
+from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes
+from tensorflow.python.framework.graph_util_impl import _extract_graph_summary
+from tensorflow.python.framework.graph_util_impl import _node_name
+
+__all__ = ["fuse_op"]
+
+
+def fuse_op(graph_def, input_nodes, output_nodes, output_dtypes,
+ output_quantized, op_name, op_type):
+ """Fuse subgraph between input_nodes and output_nodes into a single custom op.
+
+ Args:
+ graph_def: A graph_pb2.GraphDef proto.
+ input_nodes: input nodes to the subgraph to be fused.
+ output_nodes: output nodes to the subgraph to be fused.
+ output_dtypes: A list of output datatypes for the custom op
+ output_quantized: A boolean flag that indicates if output is quantized
+ op_name: fused op name.
+ op_type: fused op type.
+ Returns:
+ The GraphDef of the new graph.
+
+ Raises:
+ TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.
+ """
+
+ if not isinstance(graph_def, graph_pb2.GraphDef):
+ raise TypeError("graph_def must be a graph_pb2.GraphDef proto.")
+
+ if isinstance(input_nodes, six.string_types):
+ raise TypeError("input_nodes must be a list.")
+
+ if isinstance(output_nodes, six.string_types):
+ raise TypeError("output_nodes must be a list.")
+
+ name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
+ graph_def)
+ _assert_nodes_are_present(name_to_node, input_nodes + output_nodes)
+
+ # Nodes upto and including input_nodes
+ reachable_by_input = _bfs_for_reachable_nodes(input_nodes, name_to_input_name)
+ # Nodes upto and including output_nodes
+ reachable_by_output = _bfs_for_reachable_nodes(output_nodes,
+ name_to_input_name)
+
+ # Set of nodes in the list input_nodes
+ input_nodes_set = set(input_nodes)
+
+ # Set of nodes in the list output_nodes
+ output_nodes_set = set(output_nodes)
+
+ nodes_post_output = []
+ for node in graph_def.node:
+ n = _node_name(node.name)
+ if n in reachable_by_output:
+ if n not in reachable_by_input and n not in output_nodes_set:
+ # n is between input and output, i.e., part of the fused op
+ next_to_visit = [n]
+ while next_to_visit:
+ cur_node = next_to_visit[0]
+ del next_to_visit[0]
+ if cur_node in reachable_by_input and cur_node not in input_nodes_set:
+ raise TypeError("Node %s uses input %s not in input_nodes." %
+ (n, cur_node))
+ if cur_node not in input_nodes_set:
+ next_to_visit += name_to_input_name[cur_node]
+ else:
+ nodes_post_output.append(n)
+
+ # Add all nodes upto the input nodes
+ out = graph_pb2.GraphDef()
+ reachable_by_input_sorted = sorted(
+ list(reachable_by_input), key=lambda n: name_to_seq_num[n])
+ for node in reachable_by_input_sorted:
+ out.node.extend([copy.deepcopy(name_to_node[node])])
+
+ # Add the custom op
+ new_node = node_def_pb2.NodeDef()
+ for node in input_nodes:
+ new_node.input.append(node)
+ new_node.attr["_output_types"].list.type[:] = output_dtypes
+ new_node.attr["_output_quantized"].b = output_quantized
+ new_node.op = op_type
+ new_node.name = op_name
+ out.node.extend([new_node])
+
+ # Add the nodes in the output of the custom op
+ for index, n in enumerate(output_nodes):
+ assert len(name_to_node[n].input) == 1
+ new_node = copy.deepcopy(name_to_node[n])
+ del new_node.input[:]
+ new_node.input.append(op_name + (":" + str(index) if index != 0 else ""))
+ out.node.extend([new_node])
+
+ # Add the nodes post output_nodes
+ for n in nodes_post_output:
+ out.node.extend([copy.deepcopy(name_to_node[n])])
+
+ out.library.CopyFrom(graph_def.library)
+ out.versions.CopyFrom(graph_def.versions)
+ return out
diff --git a/tensorflow/contrib/framework/python/framework/graph_util_test.py b/tensorflow/contrib/framework/python/framework/graph_util_test.py
new file mode 100644
index 0000000000..87b992e22e
--- /dev/null
+++ b/tensorflow/contrib/framework/python/framework/graph_util_test.py
@@ -0,0 +1,61 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""@graph_util tests."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.framework.python.framework import graph_util
+from tensorflow.core.framework import graph_pb2
+from tensorflow.core.framework import node_def_pb2
+from tensorflow.core.framework import types_pb2
+from tensorflow.python.platform import test
+
+
+def GetNewNode(name, op, input_nodes):
+ new_node = node_def_pb2.NodeDef()
+ new_node.op = op
+ new_node.name = name
+ for node in input_nodes:
+ new_node.input.append(node)
+ return new_node
+
+
+class GraphUtilTest(test.TestCase):
+
+ def testGraphUtil(self):
+ graph_def = graph_pb2.GraphDef()
+ node_a = GetNewNode('A', 'Placeholder', [])
+ node_b = GetNewNode('B', 'Op1', ['A'])
+ node_c = GetNewNode('C', 'Op1', ['B'])
+ node_d = GetNewNode('D', 'Op1', ['C'])
+ node_e = GetNewNode('E', 'Op1', ['D'])
+ graph_def.node.extend([node_a, node_b, node_c, node_d, node_e])
+ fused_graph_def = graph_util.fuse_op(
+ graph_def, ['A'], ['D'], [types_pb2.DT_FLOAT], True, 'FusedOp', 'Op2')
+ self.assertEqual(len(fused_graph_def.node), 4)
+ self.assertEqual(fused_graph_def.node[0].name, 'A')
+ self.assertEqual(fused_graph_def.node[1].name, 'FusedOp')
+ self.assertEqual(fused_graph_def.node[1].input[0], 'A')
+ self.assertEqual(fused_graph_def.node[1].op, 'Op2')
+ self.assertEqual(fused_graph_def.node[1].attr['_output_quantized'].b, True)
+ self.assertEqual(fused_graph_def.node[1].attr['_output_types'].list.type,
+ [types_pb2.DT_FLOAT])
+ self.assertEqual(fused_graph_def.node[2].name, 'D')
+ self.assertEqual(fused_graph_def.node[3].name, 'E')
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/framework/python/ops/__init__.py b/tensorflow/contrib/framework/python/ops/__init__.py
index edef37cf0c..685bb94779 100644
--- a/tensorflow/contrib/framework/python/ops/__init__.py
+++ b/tensorflow/contrib/framework/python/ops/__init__.py
@@ -24,5 +24,6 @@ from tensorflow.contrib.framework.python.ops.arg_scope import *
from tensorflow.contrib.framework.python.ops.checkpoint_ops import *
from tensorflow.contrib.framework.python.ops.ops import *
from tensorflow.contrib.framework.python.ops.prettyprint_ops import *
+from tensorflow.contrib.framework.python.ops.sort_ops import *
from tensorflow.contrib.framework.python.ops.variables import *
# pylint: enable=wildcard-import
diff --git a/tensorflow/contrib/framework/python/ops/sort_ops.py b/tensorflow/contrib/framework/python/ops/sort_ops.py
new file mode 100644
index 0000000000..8f62f0ea7b
--- /dev/null
+++ b/tensorflow/contrib/framework/python/ops/sort_ops.py
@@ -0,0 +1,113 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Support for sorting tensors.
+
+@@sort
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.framework import ops as framework_ops
+from tensorflow.python.framework import tensor_util
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import nn_ops
+
+
+def sort(values, axis=-1, direction='ASCENDING', name=None):
+ """Sorts a tensor.
+
+ Args:
+ values: 1-D or higher numeric `Tensor`.
+ axis: The axis along which to sort. The default is -1, which sorts the last
+ axis.
+ direction: The direction in which to sort the values (`'ASCENDING'` or
+ `'DESCENDING'`).
+ name: Optional name for the operation.
+
+ Returns:
+ A `Tensor` with the same dtype and shape as `values`, with the elements
+ sorted along the given `axis`.
+
+ Raises:
+ ValueError: If axis is not a constant scalar, or the direction is invalid.
+ """
+ with framework_ops.name_scope(name, 'sort'):
+ if direction not in _SORT_IMPL:
+ raise ValueError('%s should be one of %s' %
+ (direction, ', '.join(sorted(_SORT_IMPL.keys()))))
+ # Axis must be an integer, not a Tensor.
+ axis = framework_ops.convert_to_tensor(axis, name='axis')
+ axis_static = tensor_util.constant_value(axis)
+ if axis.shape.ndims != 0 or axis_static is None:
+ raise ValueError('axis must be a constant scalar')
+ axis_static = int(axis_static) # Avoids NumPy casting error
+
+ values = framework_ops.convert_to_tensor(values, name='values')
+
+ return _SORT_IMPL[direction](values, axis_static)
+
+
+def _descending_sort(values, axis):
+ """Sorts values in reverse using `top_k`.
+
+ Args:
+ values: Tensor of numeric values.
+ axis: Index of the axis which values should be sorted along.
+
+ Returns:
+ The sorted values.
+ """
+ k = array_ops.shape(values)[axis]
+ rank = array_ops.rank(values)
+ # Fast path: sorting the last axis.
+ if axis == -1 or axis + 1 == values.get_shape().ndims:
+ return nn_ops.top_k(values, k)[0]
+
+ # Otherwise, transpose the array. Swap axes `axis` and `rank - 1`.
+ if axis < 0:
+ # Make axis a Tensor with the real axis index if needed.
+ axis += rank
+ transposition = array_ops.concat(
+ [
+ # Axes up to axis are unchanged.
+ math_ops.range(axis),
+ # Swap axis and rank - 1.
+ [rank - 1],
+ # Axes in [axis + 1, rank - 1) are unchanged.
+ math_ops.range(axis + 1, rank - 1),
+ # Swap axis and rank - 1.
+ [axis]
+ ],
+ axis=0)
+ top_k_input = array_ops.transpose(values, transposition)
+ values, unused_indices = nn_ops.top_k(top_k_input, k)
+ # transposition contains a single cycle of length 2 (swapping 2 elements),
+ # so it is an involution (it is its own inverse).
+ return array_ops.transpose(values, transposition)
+
+
+def _ascending_sort(values, axis):
+ # Negate the values to get the ascending order from descending sort.
+ values_or_indices = _descending_sort(-values, axis)
+ return -values_or_indices
+
+
+_SORT_IMPL = {
+ 'ASCENDING': _ascending_sort,
+ 'DESCENDING': _descending_sort,
+}
diff --git a/tensorflow/contrib/framework/python/ops/sort_ops_test.py b/tensorflow/contrib/framework/python/ops/sort_ops_test.py
new file mode 100644
index 0000000000..d08ae502f1
--- /dev/null
+++ b/tensorflow/contrib/framework/python/ops/sort_ops_test.py
@@ -0,0 +1,95 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for the sort wrapper."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.framework.python.ops import sort_ops
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.platform import test
+
+
+class SortTest(test.TestCase):
+
+ def testRandom_lowDimensionality(self):
+ self._testRandom_lowDimensionality(negative_axis=False)
+
+ def testRandom_lowDimensionality_negative(self):
+ self._testRandom_lowDimensionality(negative_axis=True)
+
+ def _testRandom_lowDimensionality(self, negative_axis):
+ np.random.seed(42)
+ for _ in range(20):
+ rank = np.random.randint(1, 3)
+ shape = [np.random.randint(0, 20) for _ in range(rank)]
+ arr = np.random.random(shape)
+ sort_axis = np.random.choice(rank)
+ if negative_axis:
+ sort_axis = -1 - sort_axis
+ with self.test_session():
+ self.assertAllEqual(
+ np.sort(arr, axis=sort_axis),
+ sort_ops.sort(constant_op.constant(arr), axis=sort_axis).eval())
+
+ def testRandom_highDimensionality(self):
+ np.random.seed(100)
+ for _ in range(20):
+ rank = np.random.randint(5, 15)
+ shape = [np.random.randint(1, 4) for _ in range(rank)]
+ arr = np.random.random(shape)
+ sort_axis = np.random.choice(rank)
+ with self.test_session():
+ self.assertAllEqual(
+ np.sort(arr, axis=sort_axis),
+ sort_ops.sort(constant_op.constant(arr), axis=sort_axis).eval())
+
+ def testScalar(self):
+ # Create an empty scalar where the static shape is unknown.
+ zeros_length_1 = array_ops.zeros(
+ random_ops.random_uniform([1], minval=0, maxval=1, dtype=dtypes.int32),
+ dtype=dtypes.int32)
+ scalar = array_ops.zeros(zeros_length_1)
+
+ sort = sort_ops.sort(scalar)
+ with self.test_session():
+ with self.assertRaises(errors.InvalidArgumentError):
+ sort.eval()
+
+ def testNegativeOutOfBounds_staticShape(self):
+ arr = constant_op.constant([3, 4, 5])
+ with self.assertRaises(ValueError):
+ sort_ops.sort(arr, axis=-4)
+
+ def testDescending(self):
+ arr = np.random.random((10, 5, 5))
+ with self.test_session():
+ self.assertAllEqual(
+ np.sort(arr, axis=0)[::-1],
+ sort_ops.sort(
+ constant_op.constant(arr),
+ axis=0,
+ direction='DESCENDING').eval())
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/summary/summary_ops.py b/tensorflow/contrib/summary/summary_ops.py
index ecfa6baeff..56e3198593 100644
--- a/tensorflow/contrib/summary/summary_ops.py
+++ b/tensorflow/contrib/summary/summary_ops.py
@@ -246,8 +246,8 @@ def image(name, tensor, bad_color=None, max_images=3, family=None):
"""Writes an image summary if possible."""
def function(tag, scope):
- if bad_color is None:
- bad_color_ = constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
+ bad_color_ = (constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
+ if bad_color is None else bad_color)
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_image_summary(
context.context().summary_writer_resource,
diff --git a/tensorflow/contrib/tpu/profiler/BUILD b/tensorflow/contrib/tpu/profiler/BUILD
index f6309e2e72..0e1fca3d3c 100644
--- a/tensorflow/contrib/tpu/profiler/BUILD
+++ b/tensorflow/contrib/tpu/profiler/BUILD
@@ -95,3 +95,10 @@ tf_proto_library_cc(
cc_api_version = 2,
visibility = ["//visibility:public"],
)
+
+tf_proto_library_cc(
+ name = "tf_op_stats_proto",
+ srcs = ["tf_op_stats.proto"],
+ cc_api_version = 2,
+ visibility = ["//visibility:public"],
+)
diff --git a/tensorflow/contrib/tpu/profiler/tf_op_stats.proto b/tensorflow/contrib/tpu/profiler/tf_op_stats.proto
new file mode 100644
index 0000000000..5b2dbb3124
--- /dev/null
+++ b/tensorflow/contrib/tpu/profiler/tf_op_stats.proto
@@ -0,0 +1,127 @@
+// This proto describes the format of tensorflow operation level stats for
+// profiling (in tensorboard) purpose.
+
+syntax = "proto2";
+
+package tensorflow.tpu;
+
+// Result proto for OpMetrics.
+message OpMetricsResult {
+ // True if this OP is executed on the device; False if it is executed on the
+ // host.
+ optional bool on_device = 1;
+ reserved 2; // was uint32 id.
+ // Name of this OP.
+ optional string name = 3;
+ // Rank of this OP.
+ optional uint64 rank = 4;
+ // The starting time in cycles of the last instance of this OP executed.
+ optional double last_starttime_in_cycles = 5;
+ // The ending time in cycles of the last instance of this OP executed.
+ optional double last_endtime_in_cycles = 6;
+ // If this OP (say A), is an immediate child of another OP (say B), this field
+ // stores the sum of duration in microseconds of A inside B. If A appears more
+ // than once in B, the duration of all A's appearances will be added together.
+ // This sum will be reset after the self-time of B is calculated so that it
+ // can be reused for a new parent OP.
+ optional double sum_of_duration_in_us_as_children = 7;
+ // Number of instances that this OP occurred.
+ optional uint64 occurrences = 8;
+ // Total time in microseconds spent in this OP (accumulated
+ // over all of its occurrences).
+ optional double total_time_in_us = 9;
+ // Total self time in microseconds spent in this OP
+ // (accumulated over all of its occurrences).
+ optional double total_self_time_in_us = 10;
+ // The total self time as a fraction of sum of all OP's
+ // total self time on the host.
+ optional double host_total_self_time_as_fraction_of_all_op_time = 11;
+ // Cumulative total self time in fraction on the host.
+ optional double host_cumulative_total_self_time_as_fraction_of_all_op_time =
+ 12;
+ // The total self time as a fraction of sum of all OP's
+ // total self time on the device.
+ optional double device_total_self_time_as_fraction_of_all_op_time = 13;
+ // Cumulative total self time in fraction on the device.
+ optional double device_cumulative_total_self_time_as_fraction_of_all_op_time =
+ 14;
+ // Total number of FLOPs incurred by this OP.
+ optional double total_flops = 15;
+ // Total time in microseconds that the MXU is occupied by this OP.
+ optional double total_bytes_accessed = 16;
+ // Total time in microseconds that the MXU is occupied by this OP.
+ optional double mxu_occupancy_in_us = 17;
+ // Total time in microseconds that the XU is occupied by this OP.
+ optional double xu_occupancy_in_us = 18;
+ // Total DMA access stall time in microseconds.
+ optional double total_dma_stall_in_us = 19;
+}
+
+// Result proto for OpMetricsDb.
+message OpMetricsDbResult {
+ // A bunch of OpMetricsResults.
+ repeated OpMetricsResult metrics_db = 1;
+}
+
+// Result proto for StepInfo.
+message StepInfoResult {
+ // The (micro) step number.
+ optional uint32 step_num = 1;
+ // The step duration in picoseconds.
+ optional uint64 duration_ps = 2;
+ // The infeed duration in picoseconds.
+ // Can turn into a map if we want a variable number of ops.
+ optional uint64 infeed_duration_ps = 3;
+}
+
+// Result proto for a sequence of steps.
+message StepSequenceResult {
+ // A sequence of StepInfoResults.
+ repeated StepInfoResult step_sequence = 1;
+}
+
+// Result proto for a StepDatabase.
+message StepDatabaseResult {
+ // A map from core_id to StepSequenceResult.
+ map<uint32, StepSequenceResult> step_sequence_per_core = 1;
+}
+
+// Result proto for Dashboard data.
+message DashboardResult {
+ // The total iteration time in nanoseconds.
+ optional double iteration_time_ns = 1;
+ // The total number of iterations.
+ optional int32 num_iterations = 2;
+ // The total computation time in nanoseconds.
+ optional double computation_time_ns = 3;
+ // The total number of computations.
+ optional int32 num_computations = 4;
+}
+
+// Result proto for HloExtraInfo.
+message HloExtraInfoResult {
+ // Category of the HLO op given by the compiler.
+ optional string category = 1;
+ // The long name of the HLO that includes the dimensions.
+ optional string long_name = 2;
+}
+
+// Result proto for HloExtraInfoMap.
+message HloExtraInfoMapResult {
+ // A map from HLO name to HloExtraInfo.
+ map<string, HloExtraInfoResult> hlo_extrainfo_map = 1;
+}
+
+// Result proto for TfStatsHelper.
+message TfOpStats {
+ // The result for the TF-metric database.
+ optional OpMetricsDbResult tf_metrics_db = 1;
+ // The result for the HLO-metric database.
+ optional OpMetricsDbResult hlo_metrics_db = 2;
+ // The result for the step database.
+ optional StepDatabaseResult step_db = 3;
+ // The result for the TPU dashboard.
+ optional DashboardResult dashboard = 4;
+ // The result for the HloExtraInfoMap.
+ optional HloExtraInfoMapResult hlo_extrainfo_map = 5;
+}
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
index 5a3b831429..060b3f9129 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
@@ -66,7 +66,7 @@ _CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY]
# TODO(b/65703635): Flip the value and remove all dead code.
-_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
+_WRAP_INPUT_FN_INTO_WHILE_LOOP = True
def _create_global_step(graph):
diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD
index 7b535da0b2..9530af637e 100644
--- a/tensorflow/core/BUILD
+++ b/tensorflow/core/BUILD
@@ -1414,16 +1414,19 @@ LIB_INTERNAL_PUBLIC_HEADERS = tf_additional_lib_hdrs() + [
"platform/tracing.h",
]
+# Replicated for lib_internal and lib_internal_impl.
+LIB_INTERNAL_DEFINES = (tf_additional_lib_defines() + [
+ "TF_USE_SNAPPY",
+ ] + tf_additional_verbs_lib_defines() +
+ tf_additional_mpi_lib_defines() +
+ tf_additional_gdr_lib_defines())
+
cc_library(
name = "lib_internal",
srcs = LIB_INTERNAL_PRIVATE_HEADERS,
hdrs = LIB_INTERNAL_PUBLIC_HEADERS,
copts = tf_copts(),
- defines = tf_additional_lib_defines() + [
- "TF_USE_SNAPPY",
- ] + tf_additional_verbs_lib_defines() +
- tf_additional_mpi_lib_defines() +
- tf_additional_gdr_lib_defines(),
+ defines = LIB_INTERNAL_DEFINES,
linkopts = select({
"//tensorflow:freebsd": [],
"//tensorflow:windows": [],
@@ -1477,6 +1480,7 @@ cc_library(
),
hdrs = LIB_INTERNAL_PUBLIC_HEADERS,
copts = tf_copts(),
+ defines = LIB_INTERNAL_DEFINES,
deps = tf_additional_lib_deps() + [
":lib_hash_crc32c_accelerate_internal",
":lib_proto_parsing",
diff --git a/tensorflow/core/api_def/api_test.cc b/tensorflow/core/api_def/api_test.cc
index ceeb172fa0..d95d958d5a 100644
--- a/tensorflow/core/api_def/api_test.cc
+++ b/tensorflow/core/api_def/api_test.cc
@@ -46,92 +46,218 @@ constexpr char kDefaultApiDefDir[] =
"tensorflow/core/api_def/base_api";
constexpr char kOverridesFilePath[] =
"tensorflow/cc/ops/op_gen_overrides.pbtxt";
-constexpr char kApiDefFileFormat[] = "api_def_%c.pbtxt";
-constexpr char kAlphabet[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+constexpr char kApiDefFileFormat[] = "api_def_%s.pbtxt";
+constexpr char kApiDefFilePattern[] = "api_def_*.pbtxt";
-// Get map from first character to ApiDefs for ops
-// that start with that character.
-std::unordered_map<char, ApiDefs> GenerateApiDef(
- const OpList& ops, const OpGenOverrides& overrides) {
+void FillBaseApiDef(ApiDef* api_def, const OpDef& op) {
+ api_def->set_graph_op_name(op.name());
+ // Add arg docs
+ for (auto& input_arg : op.input_arg()) {
+ if (!input_arg.description().empty()) {
+ auto* api_def_in_arg = api_def->add_in_arg();
+ api_def_in_arg->set_name(input_arg.name());
+ api_def_in_arg->set_description(input_arg.description());
+ }
+ }
+ for (auto& output_arg : op.output_arg()) {
+ if (!output_arg.description().empty()) {
+ auto* api_def_out_arg = api_def->add_out_arg();
+ api_def_out_arg->set_name(output_arg.name());
+ api_def_out_arg->set_description(output_arg.description());
+ }
+ }
+ // Add attr docs
+ for (auto& attr : op.attr()) {
+ if (!attr.description().empty()) {
+ auto* api_def_attr = api_def->add_attr();
+ api_def_attr->set_name(attr.name());
+ api_def_attr->set_description(attr.description());
+ }
+ }
+ // Add docs
+ api_def->set_summary(op.summary());
+ api_def->set_description(op.description());
+}
+
+// Checks if arg1 should be before arg2 according to ordering in args.
+bool CheckArgBefore(const ApiDef::Arg* arg1, const ApiDef::Arg* arg2,
+ const protobuf::RepeatedPtrField<OpDef::ArgDef>& args) {
+ for (auto& arg : args) {
+ if (arg.name() == arg2->name()) {
+ return false;
+ } else if (arg.name() == arg1->name()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Checks if attr1 should be before attr2 according to ordering in op_def.
+bool CheckAttrBefore(const ApiDef::Attr* attr1, const ApiDef::Attr* attr2,
+ const OpDef& op_def) {
+ for (auto& attr : op_def.attr()) {
+ if (attr.name() == attr2->name()) {
+ return false;
+ } else if (attr.name() == attr1->name()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Applies renames to args.
+void ApplyArgOverrides(
+ protobuf::RepeatedPtrField<ApiDef::Arg>* args,
+ const protobuf::RepeatedPtrField<OpGenOverride::Rename>& renames,
+ const protobuf::RepeatedPtrField<OpDef::ArgDef>& op_args,
+ const string& op_name) {
+ for (auto& rename : renames) {
+ // First check if rename is valid.
+ bool valid = false;
+ for (const auto& op_arg : op_args) {
+ if (op_arg.name() == rename.from()) {
+ valid = true;
+ }
+ }
+ QCHECK(valid) << rename.from() << " is not a valid argument for "
+ << op_name;
+ bool found_arg = false;
+ // If Arg is already in ApiDef, just update it.
+ for (int i = 0; i < args->size(); ++i) {
+ auto* arg = args->Mutable(i);
+ if (arg->name() == rename.from()) {
+ arg->set_rename_to(rename.to());
+ found_arg = true;
+ break;
+ }
+ }
+ if (!found_arg) { // not in ApiDef, add a new arg.
+ auto* new_arg = args->Add();
+ new_arg->set_name(rename.from());
+ new_arg->set_rename_to(rename.to());
+ }
+ }
+ // We don't really need a specific order here right now.
+ // However, it is clearer if order follows OpDef.
+ std::sort(args->pointer_begin(), args->pointer_end(),
+ [&](ApiDef::Arg* arg1, ApiDef::Arg* arg2) {
+ return CheckArgBefore(arg1, arg2, op_args);
+ });
+}
+
+// Returns existing attribute with the given name if such
+// attribute exists. Otherwise, adds a new attribute and returns it.
+ApiDef::Attr* FindOrAddAttr(ApiDef* api_def, const string attr_name) {
+ // If Attr is already in ApiDef, just update it.
+ for (int i = 0; i < api_def->attr_size(); ++i) {
+ auto* attr = api_def->mutable_attr(i);
+ if (attr->name() == attr_name) {
+ return attr;
+ }
+ }
+ // Add a new Attr.
+ auto* new_attr = api_def->add_attr();
+ new_attr->set_name(attr_name);
+ return new_attr;
+}
+
+// Applies renames and default values to attributes.
+void ApplyAttrOverrides(ApiDef* api_def, const OpGenOverride& op_override,
+ const OpDef& op_def) {
+ for (auto& attr_rename : op_override.attr_rename()) {
+ auto* attr = FindOrAddAttr(api_def, attr_rename.from());
+ attr->set_rename_to(attr_rename.to());
+ }
+
+ for (auto& attr_default : op_override.attr_default()) {
+ auto* attr = FindOrAddAttr(api_def, attr_default.name());
+ *(attr->mutable_default_value()) = attr_default.value();
+ }
+ // We don't really need a specific order here right now.
+ // However, it is clearer if order follows OpDef.
+ std::sort(api_def->mutable_attr()->pointer_begin(),
+ api_def->mutable_attr()->pointer_end(),
+ [&](ApiDef::Attr* attr1, ApiDef::Attr* attr2) {
+ return CheckAttrBefore(attr1, attr2, op_def);
+ });
+}
+
+void ApplyOverridesToApiDef(ApiDef* api_def, const OpDef& op,
+ const OpGenOverride& op_override) {
+ // Fill ApiDef with data based on op and op_override.
+ // Set visibility
+ if (op_override.skip()) {
+ api_def->set_visibility(ApiDef_Visibility_SKIP);
+ } else if (op_override.hide()) {
+ api_def->set_visibility(ApiDef_Visibility_HIDDEN);
+ }
+ // Add endpoints
+ if (!op_override.rename_to().empty()) {
+ api_def->add_endpoint()->set_name(op_override.rename_to());
+ } else if (!op_override.alias().empty()) {
+ api_def->add_endpoint()->set_name(op.name());
+ }
+
+ for (auto& alias : op_override.alias()) {
+ auto* endpoint = api_def->add_endpoint();
+ endpoint->set_name(alias);
+ }
+
+ ApplyArgOverrides(api_def->mutable_in_arg(), op_override.input_rename(),
+ op.input_arg(), api_def->graph_op_name());
+ ApplyArgOverrides(api_def->mutable_out_arg(), op_override.output_rename(),
+ op.output_arg(), api_def->graph_op_name());
+ ApplyAttrOverrides(api_def, op_override, op);
+}
+
+// Get map from ApiDef file path to corresponding ApiDefs proto.
+std::unordered_map<string, ApiDefs> GenerateApiDef(
+ const string& api_def_dir, const OpList& ops,
+ const OpGenOverrides& overrides) {
std::unordered_map<string, OpGenOverride> name_to_override;
for (const auto& op_override : overrides.op()) {
name_to_override[op_override.name()] = op_override;
}
- std::unordered_map<char, ApiDefs> api_defs_map;
+ std::unordered_map<string, ApiDefs> api_defs_map;
for (const auto& op : ops.op()) {
CHECK(!op.name().empty())
<< "Encountered empty op name: %s" << op.DebugString();
- const char file_id = toupper(op.name()[0]);
- CHECK(isalpha(file_id)) << "Unexpected op name: " << op.name();
- ApiDef* api_def = api_defs_map[file_id].add_op();
- api_def->set_graph_op_name(op.name());
+ string file_path = io::JoinPath(api_def_dir, kApiDefFileFormat);
+ file_path = strings::Printf(file_path.c_str(), op.name().c_str());
+ ApiDef* api_def = api_defs_map[file_path].add_op();
+ FillBaseApiDef(api_def, op);
if (name_to_override.find(op.name()) != name_to_override.end()) {
- const auto& op_override = name_to_override[op.name()];
- // Set visibility
- if (op_override.skip()) {
- api_def->set_visibility(ApiDef_Visibility_SKIP);
- } else if (op_override.hide()) {
- api_def->set_visibility(ApiDef_Visibility_HIDDEN);
- }
- // Add endpoints
- if (!op_override.rename_to().empty()) {
- auto* endpoint = api_def->add_endpoint();
- endpoint->set_name(op_override.rename_to());
- } else {
- auto* endpoint = api_def->add_endpoint();
- endpoint->set_name(op.name());
- }
- for (auto& alias : op_override.alias()) {
- auto* endpoint = api_def->add_endpoint();
- endpoint->set_name(alias);
- }
- // Add attributes
- for (auto& attr : op.attr()) {
- auto* api_def_attr = api_def->add_attr();
- api_def_attr->set_name(attr.name());
- for (auto& attr_override : op_override.attr_default()) {
- if (attr.name() == attr_override.name()) {
- *(api_def_attr->mutable_default_value()) = attr_override.value();
- }
- }
- for (auto& attr_rename : op_override.attr_rename()) {
- if (attr.name() == attr_rename.from()) {
- api_def_attr->set_rename_to(attr_rename.to());
- }
- }
- }
- } else {
- auto* endpoint = api_def->add_endpoint();
- endpoint->set_name(op.name());
+ ApplyOverridesToApiDef(api_def, op, name_to_override[op.name()]);
}
- // Add docs
- api_def->set_summary(op.summary());
- api_def->set_description(op.description());
}
return api_defs_map;
}
-// Reads golden api defs file with the given suffix.
-string GetGoldenApiDefsStr(Env* env, const string& api_files_dir, char suffix) {
- string file_path = strings::Printf(
- io::JoinPath(api_files_dir, kApiDefFileFormat).c_str(), suffix);
- if (env->FileExists(file_path).ok()) {
+// Reads golden ApiDef files and returns a map from file name to ApiDef file
+// contents.
+std::unordered_map<string, string> GetGoldenApiDefs(
+ Env* env, const string& api_files_dir) {
+ std::vector<string> matching_paths;
+ TF_CHECK_OK(env->GetMatchingPaths(
+ io::JoinPath(api_files_dir, kApiDefFilePattern), &matching_paths));
+
+ std::unordered_map<string, string> file_path_to_api_def;
+ for (auto& file_path : matching_paths) {
string file_contents;
- TF_EXPECT_OK(ReadFileToString(env, file_path, &file_contents));
- return file_contents;
+ TF_CHECK_OK(ReadFileToString(env, file_path, &file_contents));
+ file_path_to_api_def[file_path] = file_contents;
}
- return "";
+ return file_path_to_api_def;
}
void RunApiTest(bool update_api_def, const string& api_files_dir) {
// Read C++ overrides file
- string overrides_file_contents;
+ OpGenOverrides overrides;
Env* env = Env::Default();
- TF_EXPECT_OK(
- ReadFileToString(env, kOverridesFilePath, &overrides_file_contents));
+ TF_EXPECT_OK(ReadTextProto(env, kOverridesFilePath, &overrides));
// Read all ops
OpList ops;
@@ -139,29 +265,22 @@ void RunApiTest(bool update_api_def, const string& api_files_dir) {
const std::vector<string> multi_line_fields = {"description"};
// Get expected ApiDefs
- OpGenOverrides overrides;
- auto new_api_defs_map = GenerateApiDef(ops, overrides);
+ const auto new_api_defs_map = GenerateApiDef(api_files_dir, ops, overrides);
bool updated_at_least_one_file = false;
+ const auto golden_api_defs_map = GetGoldenApiDefs(env, api_files_dir);
- for (char c : kAlphabet) {
- string golden_api_defs_str = GetGoldenApiDefsStr(env, api_files_dir, c);
- string new_api_defs_str = new_api_defs_map[c].DebugString();
+ for (auto new_api_entry : new_api_defs_map) {
+ const auto& file_path = new_api_entry.first;
+ const auto& golden_api_defs_str = golden_api_defs_map.at(file_path);
+ string new_api_defs_str = new_api_entry.second.DebugString();
new_api_defs_str = PBTxtToMultiline(new_api_defs_str, multi_line_fields);
if (golden_api_defs_str == new_api_defs_str) {
continue;
}
if (update_api_def) {
- string output_file_path =
- io::JoinPath(api_files_dir, strings::Printf(kApiDefFileFormat, c));
- if (new_api_defs_str.empty()) {
- std::cout << "Deleting " << output_file_path << "..." << std::endl;
- TF_EXPECT_OK(env->DeleteFile(output_file_path));
- } else {
- std::cout << "Updating " << output_file_path << "..." << std::endl;
- TF_EXPECT_OK(
- WriteStringToFile(env, output_file_path, new_api_defs_str));
- }
+ std::cout << "Updating " << file_path << "..." << std::endl;
+ TF_EXPECT_OK(WriteStringToFile(env, file_path, new_api_defs_str));
updated_at_least_one_file = true;
} else {
EXPECT_EQ(golden_api_defs_str, new_api_defs_str)
@@ -170,6 +289,21 @@ void RunApiTest(bool update_api_def, const string& api_files_dir) {
}
}
+ for (const auto& golden_api_entry : golden_api_defs_map) {
+ const auto& file_path = golden_api_entry.first;
+ if (new_api_defs_map.find(file_path) == new_api_defs_map.end()) {
+ if (update_api_def) {
+ std::cout << "Deleting " << file_path << "..." << std::endl;
+ TF_EXPECT_OK(env->DeleteFile(file_path));
+ updated_at_least_one_file = true;
+ } else {
+ EXPECT_EQ("", golden_api_entry.second)
+ << "To update golden API files, run "
+ << "tensorflow/core/api_def/update_api_def.sh.";
+ }
+ }
+ }
+
if (update_api_def && !updated_at_least_one_file) {
std::cout << "Api def files are already up to date." << std::endl;
}
diff --git a/tensorflow/core/api_def/base_api/api_def_A.pbtxt b/tensorflow/core/api_def/base_api/api_def_A.pbtxt
deleted file mode 100644
index 8193d1bc62..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_A.pbtxt
+++ /dev/null
@@ -1,670 +0,0 @@
-op {
- graph_op_name: "Abort"
- endpoint {
- name: "Abort"
- }
- summary: "Raise a exception to abort the process when called."
- description: <<END
-If exit_without_error is true, the process will exit normally,
-otherwise it will exit with a SIGABORT signal.
-
-Returns nothing but an exception.
-END
-}
-op {
- graph_op_name: "Abs"
- endpoint {
- name: "Abs"
- }
- summary: "Computes the absolute value of a tensor."
- description: <<END
-Given a tensor `x`, this operation returns a tensor containing the absolute
-value of each element in `x`. For example, if x is an input element and y is
-an output element, this operation computes \\(y = |x|\\).
-END
-}
-op {
- graph_op_name: "AccumulatorApplyGradient"
- endpoint {
- name: "AccumulatorApplyGradient"
- }
- summary: "Applies a gradient to a given accumulator."
- description: <<END
-Does not add if local_step is lesser than the accumulator's global_step.
-END
-}
-op {
- graph_op_name: "AccumulatorNumAccumulated"
- endpoint {
- name: "AccumulatorNumAccumulated"
- }
- summary: "Returns the number of gradients aggregated in the given accumulators."
-}
-op {
- graph_op_name: "AccumulatorSetGlobalStep"
- endpoint {
- name: "AccumulatorSetGlobalStep"
- }
- summary: "Updates the accumulator with a new value for global_step."
- description: <<END
-Logs warning if the accumulator's value is already higher than
-new_global_step.
-END
-}
-op {
- graph_op_name: "AccumulatorTakeGradient"
- endpoint {
- name: "AccumulatorTakeGradient"
- }
- summary: "Extracts the average gradient in the given ConditionalAccumulator."
- description: <<END
-The op blocks until sufficient (i.e., more than num_required)
-gradients have been accumulated. If the accumulator has already
-aggregated more than num_required gradients, it returns the average of
-the accumulated gradients. Also automatically increments the recorded
-global_step in the accumulator by 1, and resets the aggregate to 0.
-END
-}
-op {
- graph_op_name: "Acos"
- endpoint {
- name: "Acos"
- }
- summary: "Computes acos of x element-wise."
-}
-op {
- graph_op_name: "Acosh"
- endpoint {
- name: "Acosh"
- }
- summary: "Computes inverse hyperbolic cosine of x element-wise."
-}
-op {
- graph_op_name: "Add"
- endpoint {
- name: "Add"
- }
- summary: "Returns x + y element-wise."
- description: <<END
-*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "AddManySparseToTensorsMap"
- endpoint {
- name: "AddManySparseToTensorsMap"
- }
- summary: "Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles."
- description: <<END
-A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
-`sparse_values`, and `sparse_shape`, where
-
-```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
-
-An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
-having a first `sparse_indices` column taking values between `[0, N)`, where
-the minibatch size `N == sparse_shape[0]`.
-
-The input `SparseTensor` must have rank `R` greater than 1, and the first
-dimension is treated as the minibatch dimension. Elements of the `SparseTensor`
-must be sorted in increasing order of this first dimension. The stored
-`SparseTensor` objects pointed to by each row of the output `sparse_handles`
-will have rank `R-1`.
-
-The `SparseTensor` values can then be read out as part of a minibatch by passing
-the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure
-the correct `SparseTensorsMap` is accessed, ensure that the same
-`container` and `shared_name` are passed to that Op. If no `shared_name`
-is provided here, instead use the *name* of the Operation created by calling
-`AddManySparseToTensorsMap` as the `shared_name` passed to
-`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
-END
-}
-op {
- graph_op_name: "AddN"
- endpoint {
- name: "AddN"
- }
- summary: "Add all input tensors element wise."
-}
-op {
- graph_op_name: "AddSparseToTensorsMap"
- endpoint {
- name: "AddSparseToTensorsMap"
- }
- summary: "Add a `SparseTensor` to a `SparseTensorsMap` return its handle."
- description: <<END
-A `SparseTensor` is represented by three tensors: `sparse_indices`,
-`sparse_values`, and `sparse_shape`.
-
-This operator takes the given `SparseTensor` and adds it to a container
-object (a `SparseTensorsMap`). A unique key within this container is generated
-in the form of an `int64`, and this is the value that is returned.
-
-The `SparseTensor` can then be read out as part of a minibatch by passing
-the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure
-the correct `SparseTensorsMap` is accessed, ensure that the same
-`container` and `shared_name` are passed to that Op. If no `shared_name`
-is provided here, instead use the *name* of the Operation created by calling
-`AddSparseToTensorsMap` as the `shared_name` passed to
-`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
-END
-}
-op {
- graph_op_name: "AdjustContrast"
- endpoint {
- name: "AdjustContrast"
- }
- summary: "Deprecated. Disallowed in GraphDef version >= 2."
-}
-op {
- graph_op_name: "AdjustContrastv2"
- endpoint {
- name: "AdjustContrastv2"
- }
- summary: "Adjust the contrast of one or more images."
- description: <<END
-`images` is a tensor of at least 3 dimensions. The last 3 dimensions are
-interpreted as `[height, width, channels]`. The other dimensions only
-represent a collection of images, such as `[batch, height, width, channels].`
-
-Contrast is adjusted independently for each channel of each image.
-
-For each channel, the Op first computes the mean of the image pixels in the
-channel and then adjusts each component of each pixel to
-`(x - mean) * contrast_factor + mean`.
-END
-}
-op {
- graph_op_name: "AdjustHue"
- endpoint {
- name: "AdjustHue"
- }
- summary: "Adjust the hue of one or more images."
- description: <<END
-`images` is a tensor of at least 3 dimensions. The last dimension is
-interpretted as channels, and must be three.
-
-The input image is considered in the RGB colorspace. Conceptually, the RGB
-colors are first mapped into HSV. A delta is then applied all the hue values,
-and then remapped back to RGB colorspace.
-END
-}
-op {
- graph_op_name: "AdjustSaturation"
- endpoint {
- name: "AdjustSaturation"
- }
- summary: "Adjust the saturation of one or more images."
- description: <<END
-`images` is a tensor of at least 3 dimensions. The last dimension is
-interpretted as channels, and must be three.
-
-The input image is considered in the RGB colorspace. Conceptually, the RGB
-colors are first mapped into HSV. A scale is then applied all the saturation
-values, and then remapped back to RGB colorspace.
-END
-}
-op {
- graph_op_name: "All"
- endpoint {
- name: "All"
- }
- summary: "Computes the \"logical and\" of elements across dimensions of a tensor."
- description: <<END
-Reduces `input` along the dimensions given in `reduction_indices`. Unless
-`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
-retained with length 1.
-END
-}
-op {
- graph_op_name: "AllCandidateSampler"
- endpoint {
- name: "AllCandidateSampler"
- }
- summary: "Generates labels for candidate sampling with a learned unigram distribution."
- description: <<END
-See explanations of candidate sampling and the data formats at
-go/candidate-sampling.
-
-For each batch, this op picks a single set of sampled candidate labels.
-
-The advantages of sampling candidates per-batch are simplicity and the
-possibility of efficient dense matrix multiplication. The disadvantage is that
-the sampled candidates must be chosen independently of the context and of the
-true labels.
-END
-}
-op {
- graph_op_name: "Angle"
- endpoint {
- name: "Angle"
- }
- summary: "Returns the argument of a complex number."
- description: <<END
-Given a tensor `input` of complex numbers, this operation returns a tensor of
-type `float` that is the argument of each element in `input`. All elements in
-`input` must be complex numbers of the form \\(a + bj\\), where *a*
-is the real part and *b* is the imaginary part.
-
-The argument returned by this operation is of the form \\(atan2(b, a)\\).
-
-For example:
-
-```
-# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
-tf.angle(input) ==> [2.0132, 1.056]
-```
-
-@compatibility(numpy)
-Equivalent to np.angle.
-@end_compatibility
-END
-}
-op {
- graph_op_name: "Any"
- endpoint {
- name: "Any"
- }
- summary: "Computes the \"logical or\" of elements across dimensions of a tensor."
- description: <<END
-Reduces `input` along the dimensions given in `reduction_indices`. Unless
-`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
-retained with length 1.
-END
-}
-op {
- graph_op_name: "ApplyAdadelta"
- endpoint {
- name: "ApplyAdadelta"
- }
- summary: "Update \'*var\' according to the adadelta scheme."
- description: <<END
-accum = rho() * accum + (1 - rho()) * grad.square();
-update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
-update_accum = rho() * update_accum + (1 - rho()) * update.square();
-var -= update;
-END
-}
-op {
- graph_op_name: "ApplyAdagrad"
- endpoint {
- name: "ApplyAdagrad"
- }
- summary: "Update \'*var\' according to the adagrad scheme."
- description: <<END
-accum += grad * grad
-var -= lr * grad * (1 / sqrt(accum))
-END
-}
-op {
- graph_op_name: "ApplyAdagradDA"
- endpoint {
- name: "ApplyAdagradDA"
- }
- summary: "Update \'*var\' according to the proximal adagrad scheme."
-}
-op {
- graph_op_name: "ApplyAdam"
- endpoint {
- name: "ApplyAdam"
- }
- summary: "Update \'*var\' according to the Adam algorithm."
- description: <<END
-lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
-m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
-v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
-variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
-END
-}
-op {
- graph_op_name: "ApplyCenteredRMSProp"
- endpoint {
- name: "ApplyCenteredRMSProp"
- }
- summary: "Update \'*var\' according to the centered RMSProp algorithm."
- description: <<END
-The centered RMSProp algorithm uses an estimate of the centered second moment
-(i.e., the variance) for normalization, as opposed to regular RMSProp, which
-uses the (uncentered) second moment. This often helps with training, but is
-slightly more expensive in terms of computation and memory.
-
-Note that in dense implementation of this algorithm, mg, ms, and mom will
-update even if the grad is zero, but in this sparse implementation, mg, ms,
-and mom will not update in iterations during which the grad is zero.
-
-mean_square = decay * mean_square + (1-decay) * gradient ** 2
-mean_grad = decay * mean_grad + (1-decay) * gradient
-
-Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
-
-mg <- rho * mg_{t-1} + (1-rho) * grad
-ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
-var <- var - mom
-END
-}
-op {
- graph_op_name: "ApplyFtrl"
- endpoint {
- name: "ApplyFtrl"
- }
- summary: "Update \'*var\' according to the Ftrl-proximal scheme."
- description: <<END
-accum_new = accum + grad * grad
-linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-accum = accum_new
-END
-}
-op {
- graph_op_name: "ApplyFtrlV2"
- endpoint {
- name: "ApplyFtrlV2"
- }
- summary: "Update \'*var\' according to the Ftrl-proximal scheme."
- description: <<END
-grad_with_shrinkage = grad + 2 * l2_shrinkage * var
-accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
-linear += grad_with_shrinkage +
- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-accum = accum_new
-END
-}
-op {
- graph_op_name: "ApplyGradientDescent"
- endpoint {
- name: "ApplyGradientDescent"
- }
- summary: "Update \'*var\' by subtracting \'alpha\' * \'delta\' from it."
-}
-op {
- graph_op_name: "ApplyMomentum"
- endpoint {
- name: "ApplyMomentum"
- }
- summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you"
- description: <<END
-want to use Nesterov momentum.
-
-accum = accum * momentum + grad
-var -= lr * accum
-END
-}
-op {
- graph_op_name: "ApplyProximalAdagrad"
- endpoint {
- name: "ApplyProximalAdagrad"
- }
- summary: "Update \'*var\' and \'*accum\' according to FOBOS with Adagrad learning rate."
- description: <<END
-accum += grad * grad
-prox_v = var - lr * grad * (1 / sqrt(accum))
-var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
-END
-}
-op {
- graph_op_name: "ApplyProximalGradientDescent"
- endpoint {
- name: "ApplyProximalGradientDescent"
- }
- summary: "Update \'*var\' as FOBOS algorithm with fixed learning rate."
- description: <<END
-prox_v = var - alpha * delta
-var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
-END
-}
-op {
- graph_op_name: "ApplyRMSProp"
- endpoint {
- name: "ApplyRMSProp"
- }
- summary: "Update \'*var\' according to the RMSProp algorithm."
- description: <<END
-Note that in dense implementation of this algorithm, ms and mom will
-update even if the grad is zero, but in this sparse implementation, ms
-and mom will not update in iterations during which the grad is zero.
-
-mean_square = decay * mean_square + (1-decay) * gradient ** 2
-Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
-
-ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
-var <- var - mom
-END
-}
-op {
- graph_op_name: "ApproximateEqual"
- endpoint {
- name: "ApproximateEqual"
- }
- summary: "Returns the truth value of abs(x-y) < tolerance element-wise."
-}
-op {
- graph_op_name: "ArgMax"
- endpoint {
- name: "ArgMax"
- }
- summary: "Returns the index with the largest value across dimensions of a tensor."
- description: <<END
-Note that in case of ties the identity of the return value is not guaranteed.
-END
-}
-op {
- graph_op_name: "ArgMin"
- endpoint {
- name: "ArgMin"
- }
- summary: "Returns the index with the smallest value across dimensions of a tensor."
- description: <<END
-Note that in case of ties the identity of the return value is not guaranteed.
-END
-}
-op {
- graph_op_name: "AsString"
- endpoint {
- name: "AsString"
- }
- summary: "Converts each entry in the given tensor to strings. Supports many numeric"
- description: <<END
-types and boolean.
-END
-}
-op {
- graph_op_name: "Asin"
- endpoint {
- name: "Asin"
- }
- summary: "Computes asin of x element-wise."
-}
-op {
- graph_op_name: "Asinh"
- endpoint {
- name: "Asinh"
- }
- summary: "Computes inverse hyperbolic sine of x element-wise."
-}
-op {
- graph_op_name: "Assert"
- endpoint {
- name: "Assert"
- }
- summary: "Asserts that the given condition is true."
- description: <<END
-If `condition` evaluates to false, print the list of tensors in `data`.
-`summarize` determines how many entries of the tensors to print.
-END
-}
-op {
- graph_op_name: "Assign"
- endpoint {
- name: "Assign"
- }
- summary: "Update \'ref\' by assigning \'value\' to it."
- description: <<END
-This operation outputs "ref" after the assignment is done.
-This makes it easier to chain operations that need to use the reset value.
-END
-}
-op {
- graph_op_name: "AssignAdd"
- endpoint {
- name: "AssignAdd"
- }
- summary: "Update \'ref\' by adding \'value\' to it."
- description: <<END
-This operation outputs "ref" after the update is done.
-This makes it easier to chain operations that need to use the reset value.
-END
-}
-op {
- graph_op_name: "AssignSub"
- endpoint {
- name: "AssignSub"
- }
- summary: "Update \'ref\' by subtracting \'value\' from it."
- description: <<END
-This operation outputs "ref" after the update is done.
-This makes it easier to chain operations that need to use the reset value.
-END
-}
-op {
- graph_op_name: "Atan"
- endpoint {
- name: "Atan"
- }
- summary: "Computes atan of x element-wise."
-}
-op {
- graph_op_name: "Atan2"
- endpoint {
- name: "Atan2"
- }
- summary: "Computes arctangent of `y/x` element-wise, respecting signs of the arguments."
- description: <<END
-This is the angle \( \theta \in [-\pi, \pi] \) such that
-\[ x = r \cos(\theta) \]
-and
-\[ y = r \sin(\theta) \]
-where \(r = \sqrt(x^2 + y^2) \).
-END
-}
-op {
- graph_op_name: "Atanh"
- endpoint {
- name: "Atanh"
- }
- summary: "Computes inverse hyperbolic tangent of x element-wise."
-}
-op {
- graph_op_name: "AudioSpectrogram"
- endpoint {
- name: "AudioSpectrogram"
- }
- summary: "Produces a visualization of audio data over time."
- description: <<END
-Spectrograms are a standard way of representing audio information as a series of
-slices of frequency information, one slice for each window of time. By joining
-these together into a sequence, they form a distinctive fingerprint of the sound
-over time.
-
-This op expects to receive audio data as an input, stored as floats in the range
--1 to 1, together with a window width in samples, and a stride specifying how
-far to move the window between slices. From this it generates a three
-dimensional output. The lowest dimension has an amplitude value for each
-frequency during that time slice. The next dimension is time, with successive
-frequency slices. The final dimension is for the channels in the input, so a
-stereo audio input would have two here for example.
-
-This means the layout when converted and saved as an image is rotated 90 degrees
-clockwise from a typical spectrogram. Time is descending down the Y axis, and
-the frequency decreases from left to right.
-
-Each value in the result represents the square root of the sum of the real and
-imaginary parts of an FFT on the current window of samples. In this way, the
-lowest dimension represents the power of each frequency in the current window,
-and adjacent windows are concatenated in the next dimension.
-
-To get a more intuitive and visual look at what this operation does, you can run
-tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
-resulting spectrogram as a PNG image.
-END
-}
-op {
- graph_op_name: "AudioSummary"
- endpoint {
- name: "AudioSummary"
- }
- summary: "Outputs a `Summary` protocol buffer with audio."
- description: <<END
-The summary has up to `max_outputs` summary values containing audio. The
-audio is built from `tensor` which must be 3-D with shape `[batch_size,
-frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
-assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
-
-The `tag` argument is a scalar `Tensor` of type `string`. It is used to
-build the `tag` of the summary values:
-
-* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
-* If `max_outputs` is greater than 1, the summary value tags are
- generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
-END
-}
-op {
- graph_op_name: "AudioSummaryV2"
- endpoint {
- name: "AudioSummaryV2"
- }
- summary: "Outputs a `Summary` protocol buffer with audio."
- description: <<END
-The summary has up to `max_outputs` summary values containing audio. The
-audio is built from `tensor` which must be 3-D with shape `[batch_size,
-frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
-assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
-
-The `tag` argument is a scalar `Tensor` of type `string`. It is used to
-build the `tag` of the summary values:
-
-* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
-* If `max_outputs` is greater than 1, the summary value tags are
- generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
-END
-}
-op {
- graph_op_name: "AvgPool"
- endpoint {
- name: "AvgPool"
- }
- summary: "Performs average pooling on the input."
- description: <<END
-Each entry in `output` is the mean of the corresponding size `ksize`
-window in `value`.
-END
-}
-op {
- graph_op_name: "AvgPool3D"
- endpoint {
- name: "AvgPool3D"
- }
- summary: "Performs 3D average pooling on the input."
-}
-op {
- graph_op_name: "AvgPool3DGrad"
- endpoint {
- name: "AvgPool3DGrad"
- }
- summary: "Computes gradients of average pooling function."
-}
-op {
- graph_op_name: "AvgPoolGrad"
- endpoint {
- name: "AvgPoolGrad"
- }
- summary: "Computes gradients of the average pooling function."
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_Abort.pbtxt b/tensorflow/core/api_def/base_api/api_def_Abort.pbtxt
new file mode 100644
index 0000000000..6dd923c512
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Abort.pbtxt
@@ -0,0 +1,16 @@
+op {
+ graph_op_name: "Abort"
+ attr {
+ name: "error_msg"
+ description: <<END
+A string which is the message associated with the exception.
+END
+ }
+ summary: "Raise a exception to abort the process when called."
+ description: <<END
+If exit_without_error is true, the process will exit normally,
+otherwise it will exit with a SIGABORT signal.
+
+Returns nothing but an exception.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Abs.pbtxt b/tensorflow/core/api_def/base_api/api_def_Abs.pbtxt
new file mode 100644
index 0000000000..412891f4f4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Abs.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "Abs"
+ summary: "Computes the absolute value of a tensor."
+ description: <<END
+Given a tensor `x`, this operation returns a tensor containing the absolute
+value of each element in `x`. For example, if x is an input element and y is
+an output element, this operation computes \\(y = |x|\\).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AccumulateNV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_AccumulateNV2.pbtxt
new file mode 100644
index 0000000000..2f20911d2d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AccumulateNV2.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "AccumulateNV2"
+ in_arg {
+ name: "inputs"
+ description: <<END
+A list of `Tensor` objects, each with same shape and type.
+END
+ }
+ attr {
+ name: "shape"
+ description: <<END
+Shape of elements of `inputs`.
+END
+ }
+ summary: "Returns the element-wise sum of a list of tensors."
+ description: <<END
+`tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
+wait for all of its inputs to be ready before beginning to sum. This can
+save memory if inputs are ready at different times, since minimum temporary
+storage is proportional to the output size rather than the inputs size.
+
+Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
+
+Returns a `Tensor` of same shape and type as the elements of `inputs`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AccumulatorApplyGradient.pbtxt b/tensorflow/core/api_def/base_api/api_def_AccumulatorApplyGradient.pbtxt
new file mode 100644
index 0000000000..25928a32ca
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AccumulatorApplyGradient.pbtxt
@@ -0,0 +1,32 @@
+op {
+ graph_op_name: "AccumulatorApplyGradient"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a accumulator.
+END
+ }
+ in_arg {
+ name: "local_step"
+ description: <<END
+The local_step value at which the gradient was computed.
+END
+ }
+ in_arg {
+ name: "gradient"
+ description: <<END
+A tensor of the gradient to be accumulated.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The data type of accumulated gradients. Needs to correspond to the type
+of the accumulator.
+END
+ }
+ summary: "Applies a gradient to a given accumulator."
+ description: <<END
+Does not add if local_step is lesser than the accumulator's global_step.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AccumulatorNumAccumulated.pbtxt b/tensorflow/core/api_def/base_api/api_def_AccumulatorNumAccumulated.pbtxt
new file mode 100644
index 0000000000..270265a804
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AccumulatorNumAccumulated.pbtxt
@@ -0,0 +1,16 @@
+op {
+ graph_op_name: "AccumulatorNumAccumulated"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to an accumulator.
+END
+ }
+ out_arg {
+ name: "num_accumulated"
+ description: <<END
+The number of gradients aggregated in the given accumulator.
+END
+ }
+ summary: "Returns the number of gradients aggregated in the given accumulators."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AccumulatorSetGlobalStep.pbtxt b/tensorflow/core/api_def/base_api/api_def_AccumulatorSetGlobalStep.pbtxt
new file mode 100644
index 0000000000..b08a0afbc2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AccumulatorSetGlobalStep.pbtxt
@@ -0,0 +1,20 @@
+op {
+ graph_op_name: "AccumulatorSetGlobalStep"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to an accumulator.
+END
+ }
+ in_arg {
+ name: "new_global_step"
+ description: <<END
+The new global_step value to set.
+END
+ }
+ summary: "Updates the accumulator with a new value for global_step."
+ description: <<END
+Logs warning if the accumulator's value is already higher than
+new_global_step.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AccumulatorTakeGradient.pbtxt b/tensorflow/core/api_def/base_api/api_def_AccumulatorTakeGradient.pbtxt
new file mode 100644
index 0000000000..1e53de7c6f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AccumulatorTakeGradient.pbtxt
@@ -0,0 +1,36 @@
+op {
+ graph_op_name: "AccumulatorTakeGradient"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to an accumulator.
+END
+ }
+ in_arg {
+ name: "num_required"
+ description: <<END
+Number of gradients required before we return an aggregate.
+END
+ }
+ out_arg {
+ name: "average"
+ description: <<END
+The average of the accumulated gradients.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The data type of accumulated gradients. Needs to correspond to the type
+of the accumulator.
+END
+ }
+ summary: "Extracts the average gradient in the given ConditionalAccumulator."
+ description: <<END
+The op blocks until sufficient (i.e., more than num_required)
+gradients have been accumulated. If the accumulator has already
+aggregated more than num_required gradients, it returns the average of
+the accumulated gradients. Also automatically increments the recorded
+global_step in the accumulator by 1, and resets the aggregate to 0.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Acos.pbtxt b/tensorflow/core/api_def/base_api/api_def_Acos.pbtxt
new file mode 100644
index 0000000000..2184b644b2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Acos.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Acos"
+ summary: "Computes acos of x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Acosh.pbtxt b/tensorflow/core/api_def/base_api/api_def_Acosh.pbtxt
new file mode 100644
index 0000000000..da77e81498
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Acosh.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Acosh"
+ summary: "Computes inverse hyperbolic cosine of x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Add.pbtxt b/tensorflow/core/api_def/base_api/api_def_Add.pbtxt
new file mode 100644
index 0000000000..7a408af380
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Add.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Add"
+ summary: "Returns x + y element-wise."
+ description: <<END
+*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AddManySparseToTensorsMap.pbtxt b/tensorflow/core/api_def/base_api/api_def_AddManySparseToTensorsMap.pbtxt
new file mode 100644
index 0000000000..9e5726a2d3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AddManySparseToTensorsMap.pbtxt
@@ -0,0 +1,68 @@
+op {
+ graph_op_name: "AddManySparseToTensorsMap"
+ in_arg {
+ name: "sparse_indices"
+ description: <<END
+2-D. The `indices` of the minibatch `SparseTensor`.
+`sparse_indices[:, 0]` must be ordered values in `[0, N)`.
+END
+ }
+ in_arg {
+ name: "sparse_values"
+ description: <<END
+1-D. The `values` of the minibatch `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "sparse_shape"
+ description: <<END
+1-D. The `shape` of the minibatch `SparseTensor`.
+The minibatch size `N == sparse_shape[0]`.
+END
+ }
+ out_arg {
+ name: "sparse_handles"
+ description: <<END
+1-D. The handles of the `SparseTensor` now stored in the
+`SparseTensorsMap`. Shape: `[N]`.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+The container name for the `SparseTensorsMap` created by this op.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+The shared name for the `SparseTensorsMap` created by this op.
+If blank, the new Operation's unique name is used.
+END
+ }
+ summary: "Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles."
+ description: <<END
+A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
+`sparse_values`, and `sparse_shape`, where
+
+```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
+
+An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
+having a first `sparse_indices` column taking values between `[0, N)`, where
+the minibatch size `N == sparse_shape[0]`.
+
+The input `SparseTensor` must have rank `R` greater than 1, and the first
+dimension is treated as the minibatch dimension. Elements of the `SparseTensor`
+must be sorted in increasing order of this first dimension. The stored
+`SparseTensor` objects pointed to by each row of the output `sparse_handles`
+will have rank `R-1`.
+
+The `SparseTensor` values can then be read out as part of a minibatch by passing
+the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure
+the correct `SparseTensorsMap` is accessed, ensure that the same
+`container` and `shared_name` are passed to that Op. If no `shared_name`
+is provided here, instead use the *name* of the Operation created by calling
+`AddManySparseToTensorsMap` as the `shared_name` passed to
+`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AddN.pbtxt b/tensorflow/core/api_def/base_api/api_def_AddN.pbtxt
new file mode 100644
index 0000000000..64677763a4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AddN.pbtxt
@@ -0,0 +1,10 @@
+op {
+ graph_op_name: "AddN"
+ in_arg {
+ name: "inputs"
+ description: <<END
+Must all be the same size and shape.
+END
+ }
+ summary: "Add all input tensors element wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AddSparseToTensorsMap.pbtxt b/tensorflow/core/api_def/base_api/api_def_AddSparseToTensorsMap.pbtxt
new file mode 100644
index 0000000000..0438eac654
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AddSparseToTensorsMap.pbtxt
@@ -0,0 +1,58 @@
+op {
+ graph_op_name: "AddSparseToTensorsMap"
+ in_arg {
+ name: "sparse_indices"
+ description: <<END
+2-D. The `indices` of the `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "sparse_values"
+ description: <<END
+1-D. The `values` of the `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "sparse_shape"
+ description: <<END
+1-D. The `shape` of the `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "sparse_handle"
+ description: <<END
+0-D. The handle of the `SparseTensor` now stored in the
+`SparseTensorsMap`.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+The container name for the `SparseTensorsMap` created by this op.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+The shared name for the `SparseTensorsMap` created by this op.
+If blank, the new Operation's unique name is used.
+END
+ }
+ summary: "Add a `SparseTensor` to a `SparseTensorsMap` return its handle."
+ description: <<END
+A `SparseTensor` is represented by three tensors: `sparse_indices`,
+`sparse_values`, and `sparse_shape`.
+
+This operator takes the given `SparseTensor` and adds it to a container
+object (a `SparseTensorsMap`). A unique key within this container is generated
+in the form of an `int64`, and this is the value that is returned.
+
+The `SparseTensor` can then be read out as part of a minibatch by passing
+the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure
+the correct `SparseTensorsMap` is accessed, ensure that the same
+`container` and `shared_name` are passed to that Op. If no `shared_name`
+is provided here, instead use the *name* of the Operation created by calling
+`AddSparseToTensorsMap` as the `shared_name` passed to
+`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AddV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_AddV2.pbtxt
new file mode 100644
index 0000000000..1e4db21151
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AddV2.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "AddV2"
+ summary: "Returns x + y element-wise."
+ description: <<END
+*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AdjustContrast.pbtxt b/tensorflow/core/api_def/base_api/api_def_AdjustContrast.pbtxt
new file mode 100644
index 0000000000..45988d7e36
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AdjustContrast.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AdjustContrast"
+ summary: "Deprecated. Disallowed in GraphDef version >= 2."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AdjustContrastv2.pbtxt b/tensorflow/core/api_def/base_api/api_def_AdjustContrastv2.pbtxt
new file mode 100644
index 0000000000..429a5e4434
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AdjustContrastv2.pbtxt
@@ -0,0 +1,36 @@
+op {
+ graph_op_name: "AdjustContrastv2"
+ endpoint {
+ name: "AdjustContrast"
+ }
+ in_arg {
+ name: "images"
+ description: <<END
+Images to adjust. At least 3-D.
+END
+ }
+ in_arg {
+ name: "contrast_factor"
+ description: <<END
+A float multiplier for adjusting contrast.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The contrast-adjusted image or images.
+END
+ }
+ summary: "Adjust the contrast of one or more images."
+ description: <<END
+`images` is a tensor of at least 3 dimensions. The last 3 dimensions are
+interpreted as `[height, width, channels]`. The other dimensions only
+represent a collection of images, such as `[batch, height, width, channels].`
+
+Contrast is adjusted independently for each channel of each image.
+
+For each channel, the Op first computes the mean of the image pixels in the
+channel and then adjusts each component of each pixel to
+`(x - mean) * contrast_factor + mean`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AdjustHue.pbtxt b/tensorflow/core/api_def/base_api/api_def_AdjustHue.pbtxt
new file mode 100644
index 0000000000..bfaf676860
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AdjustHue.pbtxt
@@ -0,0 +1,30 @@
+op {
+ graph_op_name: "AdjustHue"
+ in_arg {
+ name: "images"
+ description: <<END
+Images to adjust. At least 3-D.
+END
+ }
+ in_arg {
+ name: "delta"
+ description: <<END
+A float delta to add to the hue.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The hue-adjusted image or images.
+END
+ }
+ summary: "Adjust the hue of one or more images."
+ description: <<END
+`images` is a tensor of at least 3 dimensions. The last dimension is
+interpretted as channels, and must be three.
+
+The input image is considered in the RGB colorspace. Conceptually, the RGB
+colors are first mapped into HSV. A delta is then applied all the hue values,
+and then remapped back to RGB colorspace.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AdjustSaturation.pbtxt b/tensorflow/core/api_def/base_api/api_def_AdjustSaturation.pbtxt
new file mode 100644
index 0000000000..97be0fda11
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AdjustSaturation.pbtxt
@@ -0,0 +1,30 @@
+op {
+ graph_op_name: "AdjustSaturation"
+ in_arg {
+ name: "images"
+ description: <<END
+Images to adjust. At least 3-D.
+END
+ }
+ in_arg {
+ name: "scale"
+ description: <<END
+A float scale to add to the saturation.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The hue-adjusted image or images.
+END
+ }
+ summary: "Adjust the saturation of one or more images."
+ description: <<END
+`images` is a tensor of at least 3 dimensions. The last dimension is
+interpretted as channels, and must be three.
+
+The input image is considered in the RGB colorspace. Conceptually, the RGB
+colors are first mapped into HSV. A scale is then applied all the saturation
+values, and then remapped back to RGB colorspace.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_All.pbtxt b/tensorflow/core/api_def/base_api/api_def_All.pbtxt
new file mode 100644
index 0000000000..623389988a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_All.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "All"
+ endpoint {
+ name: "All"
+ }
+ endpoint {
+ name: "ReduceAll"
+ }
+ in_arg {
+ name: "input"
+ description: <<END
+The tensor to reduce.
+END
+ }
+ in_arg {
+ name: "reduction_indices"
+ rename_to: "axis"
+ description: <<END
+The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The reduced tensor.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If true, retain reduced dimensions with length 1.
+END
+ }
+ summary: "Computes the \"logical and\" of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AllCandidateSampler.pbtxt b/tensorflow/core/api_def/base_api/api_def_AllCandidateSampler.pbtxt
new file mode 100644
index 0000000000..38b8e2bfba
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AllCandidateSampler.pbtxt
@@ -0,0 +1,80 @@
+op {
+ graph_op_name: "AllCandidateSampler"
+ in_arg {
+ name: "true_classes"
+ description: <<END
+A batch_size * num_true matrix, in which each row contains the
+IDs of the num_true target_classes in the corresponding original label.
+END
+ }
+ out_arg {
+ name: "sampled_candidates"
+ description: <<END
+A vector of length num_sampled, in which each element is
+the ID of a sampled candidate.
+END
+ }
+ out_arg {
+ name: "true_expected_count"
+ description: <<END
+A batch_size * num_true matrix, representing
+the number of times each candidate is expected to occur in a batch
+of sampled candidates. If unique=true, then this is a probability.
+END
+ }
+ out_arg {
+ name: "sampled_expected_count"
+ description: <<END
+A vector of length num_sampled, for each sampled
+candidate representing the number of times the candidate is expected
+to occur in a batch of sampled candidates. If unique=true, then this is a
+probability.
+END
+ }
+ attr {
+ name: "num_true"
+ description: <<END
+Number of true labels per context.
+END
+ }
+ attr {
+ name: "num_sampled"
+ description: <<END
+Number of candidates to produce.
+END
+ }
+ attr {
+ name: "unique"
+ description: <<END
+If unique is true, we sample with rejection, so that all sampled
+candidates in a batch are unique. This requires some approximation to
+estimate the post-rejection sampling probabilities.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+An second seed to avoid seed collision.
+END
+ }
+ summary: "Generates labels for candidate sampling with a learned unigram distribution."
+ description: <<END
+See explanations of candidate sampling and the data formats at
+go/candidate-sampling.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Angle.pbtxt b/tensorflow/core/api_def/base_api/api_def_Angle.pbtxt
new file mode 100644
index 0000000000..a26e5e2447
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Angle.pbtxt
@@ -0,0 +1,23 @@
+op {
+ graph_op_name: "Angle"
+ summary: "Returns the argument of a complex number."
+ description: <<END
+Given a tensor `input` of complex numbers, this operation returns a tensor of
+type `float` that is the argument of each element in `input`. All elements in
+`input` must be complex numbers of the form \\(a + bj\\), where *a*
+is the real part and *b* is the imaginary part.
+
+The argument returned by this operation is of the form \\(atan2(b, a)\\).
+
+For example:
+
+```
+# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+tf.angle(input) ==> [2.0132, 1.056]
+```
+
+@compatibility(numpy)
+Equivalent to np.angle.
+@end_compatibility
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Any.pbtxt b/tensorflow/core/api_def/base_api/api_def_Any.pbtxt
new file mode 100644
index 0000000000..09fd4e0b60
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Any.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "Any"
+ endpoint {
+ name: "Any"
+ }
+ endpoint {
+ name: "ReduceAny"
+ }
+ in_arg {
+ name: "input"
+ description: <<END
+The tensor to reduce.
+END
+ }
+ in_arg {
+ name: "reduction_indices"
+ rename_to: "axis"
+ description: <<END
+The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The reduced tensor.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If true, retain reduced dimensions with length 1.
+END
+ }
+ summary: "Computes the \"logical or\" of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyAdadelta.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyAdadelta.pbtxt
new file mode 100644
index 0000000000..d3aa32ba9f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyAdadelta.pbtxt
@@ -0,0 +1,65 @@
+op {
+ graph_op_name: "ApplyAdadelta"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum_update"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Constant factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var, accum and update_accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'*var\' according to the adadelta scheme."
+ description: <<END
+accum = rho() * accum + (1 - rho()) * grad.square();
+update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
+update_accum = rho() * update_accum + (1 - rho()) * update.square();
+var -= update;
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyAdagrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyAdagrad.pbtxt
new file mode 100644
index 0000000000..057786b6aa
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyAdagrad.pbtxt
@@ -0,0 +1,46 @@
+op {
+ graph_op_name: "ApplyAdagrad"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the adagrad scheme."
+ description: <<END
+accum += grad * grad
+var -= lr * grad * (1 / sqrt(accum))
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyAdagradDA.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyAdagradDA.pbtxt
new file mode 100644
index 0000000000..1453bb558d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyAdagradDA.pbtxt
@@ -0,0 +1,65 @@
+op {
+ graph_op_name: "ApplyAdagradDA"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "gradient_accumulator"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "gradient_squared_accumulator"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "global_step"
+ description: <<END
+Training step number. Must be a scalar.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var and accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'*var\' according to the proximal adagrad scheme."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyAdam.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyAdam.pbtxt
new file mode 100644
index 0000000000..c2858a1bfb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyAdam.pbtxt
@@ -0,0 +1,90 @@
+op {
+ graph_op_name: "ApplyAdam"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "m"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "v"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "beta1_power"
+ description: <<END
+Must be a scalar.
+END
+ }
+ in_arg {
+ name: "beta2_power"
+ description: <<END
+Must be a scalar.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "beta1"
+ description: <<END
+Momentum factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "beta2"
+ description: <<END
+Momentum factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Ridge term. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var, m, and v tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ attr {
+ name: "use_nesterov"
+ description: <<END
+If `True`, uses the nesterov update.
+END
+ }
+ summary: "Update \'*var\' according to the Adam algorithm."
+ description: <<END
+lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
+m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
+v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
+variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyCenteredRMSProp.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyCenteredRMSProp.pbtxt
new file mode 100644
index 0000000000..c88d18d3b2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyCenteredRMSProp.pbtxt
@@ -0,0 +1,86 @@
+op {
+ graph_op_name: "ApplyCenteredRMSProp"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mg"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "ms"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mom"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Ridge term. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var, mg, ms, and mom tensors is
+protected by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the centered RMSProp algorithm."
+ description: <<END
+The centered RMSProp algorithm uses an estimate of the centered second moment
+(i.e., the variance) for normalization, as opposed to regular RMSProp, which
+uses the (uncentered) second moment. This often helps with training, but is
+slightly more expensive in terms of computation and memory.
+
+Note that in dense implementation of this algorithm, mg, ms, and mom will
+update even if the grad is zero, but in this sparse implementation, mg, ms,
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+mean_grad = decay * mean_grad + (1-decay) * gradient
+
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
+
+mg <- rho * mg_{t-1} + (1-rho) * grad
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
+var <- var - mom
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyFtrl.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyFtrl.pbtxt
new file mode 100644
index 0000000000..77da9e4d51
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyFtrl.pbtxt
@@ -0,0 +1,73 @@
+op {
+ graph_op_name: "ApplyFtrl"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "linear"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regulariation. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regulariation. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "lr_power"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+accum_new = accum + grad * grad
+linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyFtrlV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyFtrlV2.pbtxt
new file mode 100644
index 0000000000..974f3adc19
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyFtrlV2.pbtxt
@@ -0,0 +1,75 @@
+op {
+ graph_op_name: "ApplyFtrlV2"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "linear"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regulariation. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 shrinkage regulariation. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "lr_power"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+grad_with_shrinkage = grad + 2 * l2_shrinkage * var
+accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
+linear += grad_with_shrinkage +
+ (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyGradientDescent.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyGradientDescent.pbtxt
new file mode 100644
index 0000000000..2f38ebd1b8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyGradientDescent.pbtxt
@@ -0,0 +1,35 @@
+op {
+ graph_op_name: "ApplyGradientDescent"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "alpha"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "delta"
+ description: <<END
+The change.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, the subtraction will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'*var\' by subtracting \'alpha\' * \'delta\' from it."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyMomentum.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyMomentum.pbtxt
new file mode 100644
index 0000000000..55326fd35c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyMomentum.pbtxt
@@ -0,0 +1,62 @@
+op {
+ graph_op_name: "ApplyMomentum"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "momentum"
+ description: <<END
+Momentum. Must be a scalar.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ attr {
+ name: "use_nesterov"
+ description: <<END
+If `True`, the tensor passed to compute grad will be
+var - lr * momentum * accum, so in the end, the var you get is actually
+var - lr * momentum * accum.
+END
+ }
+ summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you"
+ description: <<END
+want to use Nesterov momentum.
+
+accum = accum * momentum + grad
+var -= lr * accum
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyProximalAdagrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyProximalAdagrad.pbtxt
new file mode 100644
index 0000000000..a683ba12a4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyProximalAdagrad.pbtxt
@@ -0,0 +1,58 @@
+op {
+ graph_op_name: "ApplyProximalAdagrad"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var and accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'*var\' and \'*accum\' according to FOBOS with Adagrad learning rate."
+ description: <<END
+accum += grad * grad
+prox_v = var - lr * grad * (1 / sqrt(accum))
+var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyProximalGradientDescent.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyProximalGradientDescent.pbtxt
new file mode 100644
index 0000000000..7914c60b71
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyProximalGradientDescent.pbtxt
@@ -0,0 +1,51 @@
+op {
+ graph_op_name: "ApplyProximalGradientDescent"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "alpha"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "delta"
+ description: <<END
+The change.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the subtraction will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'*var\' as FOBOS algorithm with fixed learning rate."
+ description: <<END
+prox_v = var - alpha * delta
+var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApplyRMSProp.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApplyRMSProp.pbtxt
new file mode 100644
index 0000000000..8ecf89c0f4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApplyRMSProp.pbtxt
@@ -0,0 +1,72 @@
+op {
+ graph_op_name: "ApplyRMSProp"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "ms"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mom"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Ridge term. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var, ms, and mom tensors is protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the RMSProp algorithm."
+ description: <<END
+Note that in dense implementation of this algorithm, ms and mom will
+update even if the grad is zero, but in this sparse implementation, ms
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ApproximateEqual.pbtxt b/tensorflow/core/api_def/base_api/api_def_ApproximateEqual.pbtxt
new file mode 100644
index 0000000000..8842fa9bbc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ApproximateEqual.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ApproximateEqual"
+ summary: "Returns the truth value of abs(x-y) < tolerance element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ArgMax.pbtxt b/tensorflow/core/api_def/base_api/api_def_ArgMax.pbtxt
new file mode 100644
index 0000000000..0cc81d1c8b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ArgMax.pbtxt
@@ -0,0 +1,15 @@
+op {
+ graph_op_name: "ArgMax"
+ in_arg {
+ name: "dimension"
+ description: <<END
+int32 or int64, must be in the range `[-rank(input), rank(input))`.
+Describes which dimension of the input Tensor to reduce across. For vectors,
+use dimension = 0.
+END
+ }
+ summary: "Returns the index with the largest value across dimensions of a tensor."
+ description: <<END
+Note that in case of ties the identity of the return value is not guaranteed.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ArgMin.pbtxt b/tensorflow/core/api_def/base_api/api_def_ArgMin.pbtxt
new file mode 100644
index 0000000000..fb7410c5fd
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ArgMin.pbtxt
@@ -0,0 +1,15 @@
+op {
+ graph_op_name: "ArgMin"
+ in_arg {
+ name: "dimension"
+ description: <<END
+int32 or int64, must be in the range `[-rank(input), rank(input))`.
+Describes which dimension of the input Tensor to reduce across. For vectors,
+use dimension = 0.
+END
+ }
+ summary: "Returns the index with the smallest value across dimensions of a tensor."
+ description: <<END
+Note that in case of ties the identity of the return value is not guaranteed.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AsString.pbtxt b/tensorflow/core/api_def/base_api/api_def_AsString.pbtxt
new file mode 100644
index 0000000000..5f2bca8eda
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AsString.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "AsString"
+ attr {
+ name: "precision"
+ description: <<END
+The post-decimal precision to use for floating point numbers.
+Only used if precision > -1.
+END
+ }
+ attr {
+ name: "scientific"
+ description: <<END
+Use scientific notation for floating point numbers.
+END
+ }
+ attr {
+ name: "shortest"
+ description: <<END
+Use shortest representation (either scientific or standard) for
+floating point numbers.
+END
+ }
+ attr {
+ name: "width"
+ description: <<END
+Pad pre-decimal numbers to this width.
+Applies to both floating point and integer numbers.
+Only used if width > -1.
+END
+ }
+ attr {
+ name: "fill"
+ description: <<END
+The value to pad if width > -1. If empty, pads with spaces.
+Another typical value is '0'. String cannot be longer than 1 character.
+END
+ }
+ summary: "Converts each entry in the given tensor to strings. Supports many numeric"
+ description: <<END
+types and boolean.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Asin.pbtxt b/tensorflow/core/api_def/base_api/api_def_Asin.pbtxt
new file mode 100644
index 0000000000..19e1b14421
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Asin.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Asin"
+ summary: "Computes asin of x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Asinh.pbtxt b/tensorflow/core/api_def/base_api/api_def_Asinh.pbtxt
new file mode 100644
index 0000000000..20f4dab861
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Asinh.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Asinh"
+ summary: "Computes inverse hyperbolic sine of x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Assert.pbtxt b/tensorflow/core/api_def/base_api/api_def_Assert.pbtxt
new file mode 100644
index 0000000000..90e5df8149
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Assert.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "Assert"
+ in_arg {
+ name: "condition"
+ description: <<END
+The condition to evaluate.
+END
+ }
+ in_arg {
+ name: "data"
+ description: <<END
+The tensors to print out when condition is false.
+END
+ }
+ attr {
+ name: "summarize"
+ description: <<END
+Print this many entries of each tensor.
+END
+ }
+ summary: "Asserts that the given condition is true."
+ description: <<END
+If `condition` evaluates to false, print the list of tensors in `data`.
+`summarize` determines how many entries of the tensors to print.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Assign.pbtxt b/tensorflow/core/api_def/base_api/api_def_Assign.pbtxt
new file mode 100644
index 0000000000..4ae9b49f49
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Assign.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "Assign"
+ in_arg {
+ name: "ref"
+ description: <<END
+Should be from a `Variable` node. May be uninitialized.
+END
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+The value to be assigned to the variable.
+END
+ }
+ out_arg {
+ name: "output_ref"
+ description: <<END
+= Same as "ref". Returned as a convenience for operations that want
+to use the new value after the variable has been reset.
+END
+ }
+ attr {
+ name: "validate_shape"
+ description: <<END
+If true, the operation will validate that the shape
+of 'value' matches the shape of the Tensor being assigned to. If false,
+'ref' will take on the shape of 'value'.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the assignment will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'ref\' by assigning \'value\' to it."
+ description: <<END
+This operation outputs "ref" after the assignment is done.
+This makes it easier to chain operations that need to use the reset value.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AssignAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_AssignAdd.pbtxt
new file mode 100644
index 0000000000..d09ec5e196
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AssignAdd.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "AssignAdd"
+ in_arg {
+ name: "ref"
+ description: <<END
+Should be from a `Variable` node.
+END
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+The value to be added to the variable.
+END
+ }
+ out_arg {
+ name: "output_ref"
+ description: <<END
+= Same as "ref". Returned as a convenience for operations that want
+to use the new value after the variable has been updated.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the addition will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'ref\' by adding \'value\' to it."
+ description: <<END
+This operation outputs "ref" after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AssignAddVariableOp.pbtxt b/tensorflow/core/api_def/base_api/api_def_AssignAddVariableOp.pbtxt
new file mode 100644
index 0000000000..5d21d7bab6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AssignAddVariableOp.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "AssignAddVariableOp"
+ in_arg {
+ name: "resource"
+ description: <<END
+handle to the resource in which to store the variable.
+END
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+the value by which the variable will be incremented.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+the dtype of the value.
+END
+ }
+ summary: "Adds a value to the current value of a variable."
+ description: <<END
+Any ReadVariableOp which depends directly or indirectly on this assign is
+guaranteed to see the incremented value or a subsequent newer one.
+
+Outputs the incremented value, which can be used to totally order the
+increments to this variable.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AssignSub.pbtxt b/tensorflow/core/api_def/base_api/api_def_AssignSub.pbtxt
new file mode 100644
index 0000000000..191a5c34fe
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AssignSub.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "AssignSub"
+ in_arg {
+ name: "ref"
+ description: <<END
+Should be from a `Variable` node.
+END
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+The value to be subtracted to the variable.
+END
+ }
+ out_arg {
+ name: "output_ref"
+ description: <<END
+= Same as "ref". Returned as a convenience for operations that want
+to use the new value after the variable has been updated.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the subtraction will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'ref\' by subtracting \'value\' from it."
+ description: <<END
+This operation outputs "ref" after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AssignSubVariableOp.pbtxt b/tensorflow/core/api_def/base_api/api_def_AssignSubVariableOp.pbtxt
new file mode 100644
index 0000000000..102201c4cb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AssignSubVariableOp.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "AssignSubVariableOp"
+ in_arg {
+ name: "resource"
+ description: <<END
+handle to the resource in which to store the variable.
+END
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+the value by which the variable will be incremented.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+the dtype of the value.
+END
+ }
+ summary: "Subtracts a value from the current value of a variable."
+ description: <<END
+Any ReadVariableOp which depends directly or indirectly on this assign is
+guaranteed to see the incremented value or a subsequent newer one.
+
+Outputs the incremented value, which can be used to totally order the
+increments to this variable.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AssignVariableOp.pbtxt b/tensorflow/core/api_def/base_api/api_def_AssignVariableOp.pbtxt
new file mode 100644
index 0000000000..d6fe81d573
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AssignVariableOp.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "AssignVariableOp"
+ in_arg {
+ name: "resource"
+ description: <<END
+handle to the resource in which to store the variable.
+END
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+the value to set the new tensor to use.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+the dtype of the value.
+END
+ }
+ summary: "Assigns a new value to a variable."
+ description: <<END
+Any ReadVariableOp with a control dependency on this op is guaranteed to return
+this value or a subsequent newer value of the variable.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Atan.pbtxt b/tensorflow/core/api_def/base_api/api_def_Atan.pbtxt
new file mode 100644
index 0000000000..557cf183e4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Atan.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Atan"
+ summary: "Computes atan of x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Atan2.pbtxt b/tensorflow/core/api_def/base_api/api_def_Atan2.pbtxt
new file mode 100644
index 0000000000..d2c8ef5939
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Atan2.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "Atan2"
+ summary: "Computes arctangent of `y/x` element-wise, respecting signs of the arguments."
+ description: <<END
+This is the angle \( \theta \in [-\pi, \pi] \) such that
+\[ x = r \cos(\theta) \]
+and
+\[ y = r \sin(\theta) \]
+where \(r = \sqrt(x^2 + y^2) \).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Atanh.pbtxt b/tensorflow/core/api_def/base_api/api_def_Atanh.pbtxt
new file mode 100644
index 0000000000..0ef1180f3d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Atanh.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Atanh"
+ summary: "Computes inverse hyperbolic tangent of x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AudioSpectrogram.pbtxt b/tensorflow/core/api_def/base_api/api_def_AudioSpectrogram.pbtxt
new file mode 100644
index 0000000000..6631f4e04c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AudioSpectrogram.pbtxt
@@ -0,0 +1,63 @@
+op {
+ graph_op_name: "AudioSpectrogram"
+ in_arg {
+ name: "input"
+ description: <<END
+Float representation of audio data.
+END
+ }
+ out_arg {
+ name: "spectrogram"
+ description: <<END
+3D representation of the audio frequencies as an image.
+END
+ }
+ attr {
+ name: "window_size"
+ description: <<END
+How wide the input window is in samples. For the highest efficiency
+this should be a power of two, but other values are accepted.
+END
+ }
+ attr {
+ name: "stride"
+ description: <<END
+How widely apart the center of adjacent sample windows should be.
+END
+ }
+ attr {
+ name: "magnitude_squared"
+ description: <<END
+Whether to return the squared magnitude or just the
+magnitude. Using squared magnitude can avoid extra calculations.
+END
+ }
+ summary: "Produces a visualization of audio data over time."
+ description: <<END
+Spectrograms are a standard way of representing audio information as a series of
+slices of frequency information, one slice for each window of time. By joining
+these together into a sequence, they form a distinctive fingerprint of the sound
+over time.
+
+This op expects to receive audio data as an input, stored as floats in the range
+-1 to 1, together with a window width in samples, and a stride specifying how
+far to move the window between slices. From this it generates a three
+dimensional output. The lowest dimension has an amplitude value for each
+frequency during that time slice. The next dimension is time, with successive
+frequency slices. The final dimension is for the channels in the input, so a
+stereo audio input would have two here for example.
+
+This means the layout when converted and saved as an image is rotated 90 degrees
+clockwise from a typical spectrogram. Time is descending down the Y axis, and
+the frequency decreases from left to right.
+
+Each value in the result represents the square root of the sum of the real and
+imaginary parts of an FFT on the current window of samples. In this way, the
+lowest dimension represents the power of each frequency in the current window,
+and adjacent windows are concatenated in the next dimension.
+
+To get a more intuitive and visual look at what this operation does, you can run
+tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
+resulting spectrogram as a PNG image.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AudioSummary.pbtxt b/tensorflow/core/api_def/base_api/api_def_AudioSummary.pbtxt
new file mode 100644
index 0000000000..3bc70d7ce8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AudioSummary.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "AudioSummary"
+ in_arg {
+ name: "tag"
+ description: <<END
+Scalar. Used to build the `tag` attribute of the summary values.
+END
+ }
+ in_arg {
+ name: "tensor"
+ description: <<END
+2-D of shape `[batch_size, frames]`.
+END
+ }
+ out_arg {
+ name: "summary"
+ description: <<END
+Scalar. Serialized `Summary` protocol buffer.
+END
+ }
+ attr {
+ name: "sample_rate"
+ description: <<END
+The sample rate of the signal in hertz.
+END
+ }
+ attr {
+ name: "max_outputs"
+ description: <<END
+Max number of batch elements to generate audio for.
+END
+ }
+ summary: "Outputs a `Summary` protocol buffer with audio."
+ description: <<END
+The summary has up to `max_outputs` summary values containing audio. The
+audio is built from `tensor` which must be 3-D with shape `[batch_size,
+frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
+assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
+
+The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+build the `tag` of the summary values:
+
+* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
+* If `max_outputs` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AudioSummaryV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_AudioSummaryV2.pbtxt
new file mode 100644
index 0000000000..d406f22d35
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AudioSummaryV2.pbtxt
@@ -0,0 +1,50 @@
+op {
+ graph_op_name: "AudioSummaryV2"
+ endpoint {
+ name: "AudioSummary"
+ }
+ in_arg {
+ name: "tag"
+ description: <<END
+Scalar. Used to build the `tag` attribute of the summary values.
+END
+ }
+ in_arg {
+ name: "tensor"
+ description: <<END
+2-D of shape `[batch_size, frames]`.
+END
+ }
+ in_arg {
+ name: "sample_rate"
+ description: <<END
+The sample rate of the signal in hertz.
+END
+ }
+ out_arg {
+ name: "summary"
+ description: <<END
+Scalar. Serialized `Summary` protocol buffer.
+END
+ }
+ attr {
+ name: "max_outputs"
+ description: <<END
+Max number of batch elements to generate audio for.
+END
+ }
+ summary: "Outputs a `Summary` protocol buffer with audio."
+ description: <<END
+The summary has up to `max_outputs` summary values containing audio. The
+audio is built from `tensor` which must be 3-D with shape `[batch_size,
+frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
+assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
+
+The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+build the `tag` of the summary values:
+
+* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
+* If `max_outputs` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AvgPool.pbtxt b/tensorflow/core/api_def/base_api/api_def_AvgPool.pbtxt
new file mode 100644
index 0000000000..1d94662f6a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AvgPool.pbtxt
@@ -0,0 +1,48 @@
+op {
+ graph_op_name: "AvgPool"
+ in_arg {
+ name: "value"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The average pooled output tensor.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+The size of the sliding window for each dimension of `value`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of `value`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, in_height, in_width, in_channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+END
+ }
+ summary: "Performs average pooling on the input."
+ description: <<END
+Each entry in `output` is the mean of the corresponding size `ksize`
+window in `value`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AvgPool3D.pbtxt b/tensorflow/core/api_def/base_api/api_def_AvgPool3D.pbtxt
new file mode 100644
index 0000000000..8171566a21
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AvgPool3D.pbtxt
@@ -0,0 +1,46 @@
+op {
+ graph_op_name: "AvgPool3D"
+ in_arg {
+ name: "input"
+ description: <<END
+Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The average pooled output tensor.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+1-D tensor of length 5. The size of the window for each dimension of
+the input tensor. Must have `ksize[0] = ksize[4] = 1`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D tensor of length 5. The stride of the sliding window for each
+dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format of the input and output data. With the
+default format "NDHWC", the data is stored in the order of:
+ [batch, in_depth, in_height, in_width, in_channels].
+Alternatively, the format could be "NCDHW", the data storage order is:
+ [batch, in_channels, in_depth, in_height, in_width].
+END
+ }
+ summary: "Performs 3D average pooling on the input."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AvgPool3DGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_AvgPool3DGrad.pbtxt
new file mode 100644
index 0000000000..6f96be4873
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AvgPool3DGrad.pbtxt
@@ -0,0 +1,52 @@
+op {
+ graph_op_name: "AvgPool3DGrad"
+ in_arg {
+ name: "orig_input_shape"
+ description: <<END
+The original input dimensions.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+Output backprop of shape `[batch, depth, rows, cols, channels]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The backprop for input.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+1-D tensor of length 5. The size of the window for each dimension of
+the input tensor. Must have `ksize[0] = ksize[4] = 1`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D tensor of length 5. The stride of the sliding window for each
+dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format of the input and output data. With the
+default format "NDHWC", the data is stored in the order of:
+ [batch, in_depth, in_height, in_width, in_channels].
+Alternatively, the format could be "NCDHW", the data storage order is:
+ [batch, in_channels, in_depth, in_height, in_width].
+END
+ }
+ summary: "Computes gradients of average pooling function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_AvgPoolGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_AvgPoolGrad.pbtxt
new file mode 100644
index 0000000000..84e77f3ced
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_AvgPoolGrad.pbtxt
@@ -0,0 +1,52 @@
+op {
+ graph_op_name: "AvgPoolGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "orig_input_shape"
+ description: <<END
+1-D. Shape of the original input to `avg_pool`.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`. Gradients w.r.t.
+the output of `avg_pool`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D. Gradients w.r.t. the input of `avg_pool`.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+The size of the sliding window for each dimension of the input.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the input.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, in_height, in_width, in_channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+END
+ }
+ summary: "Computes gradients of the average pooling function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_B.pbtxt b/tensorflow/core/api_def/base_api/api_def_B.pbtxt
deleted file mode 100644
index 716d397f9a..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_B.pbtxt
+++ /dev/null
@@ -1,448 +0,0 @@
-op {
- graph_op_name: "Barrier"
- endpoint {
- name: "Barrier"
- }
- summary: "Defines a barrier that persists across different graph executions."
- description: <<END
-A barrier represents a key-value map, where each key is a string, and
-each value is a tuple of tensors.
-
-At runtime, the barrier contains 'complete' and 'incomplete'
-elements. A complete element has defined tensors for all components of
-its value tuple, and may be accessed using BarrierTakeMany. An
-incomplete element has some undefined components in its value tuple,
-and may be updated using BarrierInsertMany.
-END
-}
-op {
- graph_op_name: "BarrierClose"
- endpoint {
- name: "BarrierClose"
- }
- summary: "Closes the given barrier."
- description: <<END
-This operation signals that no more new elements will be inserted in the
-given barrier. Subsequent InsertMany that try to introduce a new key will fail.
-Subsequent InsertMany operations that just add missing components to already
-existing elements will continue to succeed. Subsequent TakeMany operations will
-continue to succeed if sufficient completed elements remain in the barrier.
-Subsequent TakeMany operations that would block will fail immediately.
-END
-}
-op {
- graph_op_name: "BarrierIncompleteSize"
- endpoint {
- name: "BarrierIncompleteSize"
- }
- summary: "Computes the number of incomplete elements in the given barrier."
-}
-op {
- graph_op_name: "BarrierInsertMany"
- endpoint {
- name: "BarrierInsertMany"
- }
- summary: "For each key, assigns the respective value to the specified component."
- description: <<END
-If a key is not found in the barrier, this operation will create a new
-incomplete element. If a key is found in the barrier, and the element
-already has a value at component_index, this operation will fail with
-INVALID_ARGUMENT, and leave the barrier in an undefined state.
-END
-}
-op {
- graph_op_name: "BarrierReadySize"
- endpoint {
- name: "BarrierReadySize"
- }
- summary: "Computes the number of complete elements in the given barrier."
-}
-op {
- graph_op_name: "BarrierTakeMany"
- endpoint {
- name: "BarrierTakeMany"
- }
- summary: "Takes the given number of completed elements from a barrier."
- description: <<END
-This operation concatenates completed-element component tensors along
-the 0th dimension to make a single component tensor.
-
-Elements come out of the barrier when they are complete, and in the order
-in which they were placed into the barrier. The indices output provides
-information about the batch in which each element was originally inserted
-into the barrier.
-END
-}
-op {
- graph_op_name: "BatchCholesky"
- endpoint {
- name: "BatchCholesky"
- }
-}
-op {
- graph_op_name: "BatchCholeskyGrad"
- endpoint {
- name: "BatchCholeskyGrad"
- }
-}
-op {
- graph_op_name: "BatchDataset"
- endpoint {
- name: "BatchDataset"
- }
- summary: "Creates a dataset that batches `batch_size` elements from `input_dataset`."
-}
-op {
- graph_op_name: "BatchFFT"
- endpoint {
- name: "BatchFFT"
- }
-}
-op {
- graph_op_name: "BatchFFT2D"
- endpoint {
- name: "BatchFFT2D"
- }
-}
-op {
- graph_op_name: "BatchFFT3D"
- endpoint {
- name: "BatchFFT3D"
- }
-}
-op {
- graph_op_name: "BatchIFFT"
- endpoint {
- name: "BatchIFFT"
- }
-}
-op {
- graph_op_name: "BatchIFFT2D"
- endpoint {
- name: "BatchIFFT2D"
- }
-}
-op {
- graph_op_name: "BatchIFFT3D"
- endpoint {
- name: "BatchIFFT3D"
- }
-}
-op {
- graph_op_name: "BatchMatMul"
- endpoint {
- name: "BatchMatMul"
- }
- summary: "Multiplies slices of two tensors in batches."
- description: <<END
-Multiplies all slices of `Tensor` `x` and `y` (each slice can be
-viewed as an element of a batch), and arranges the individual results
-in a single output tensor of the same batch size. Each of the
-individual slices can optionally be adjointed (to adjoint a matrix
-means to transpose and conjugate it) before multiplication by setting
-the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
-
-The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
-and `[..., r_y, c_y]`.
-
-The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
-
- r_o = c_x if adj_x else r_x
- c_o = r_y if adj_y else c_y
-
-It is computed as:
-
- output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
-END
-}
-op {
- graph_op_name: "BatchMatrixBandPart"
- endpoint {
- name: "BatchMatrixBandPart"
- }
-}
-op {
- graph_op_name: "BatchMatrixDeterminant"
- endpoint {
- name: "BatchMatrixDeterminant"
- }
-}
-op {
- graph_op_name: "BatchMatrixDiag"
- endpoint {
- name: "BatchMatrixDiag"
- }
-}
-op {
- graph_op_name: "BatchMatrixDiagPart"
- endpoint {
- name: "BatchMatrixDiagPart"
- }
-}
-op {
- graph_op_name: "BatchMatrixInverse"
- endpoint {
- name: "BatchMatrixInverse"
- }
-}
-op {
- graph_op_name: "BatchMatrixSetDiag"
- endpoint {
- name: "BatchMatrixSetDiag"
- }
-}
-op {
- graph_op_name: "BatchMatrixSolve"
- endpoint {
- name: "BatchMatrixSolve"
- }
-}
-op {
- graph_op_name: "BatchMatrixSolveLs"
- endpoint {
- name: "BatchMatrixSolveLs"
- }
-}
-op {
- graph_op_name: "BatchMatrixTriangularSolve"
- endpoint {
- name: "BatchMatrixTriangularSolve"
- }
-}
-op {
- graph_op_name: "BatchNormWithGlobalNormalization"
- endpoint {
- name: "BatchNormWithGlobalNormalization"
- }
- summary: "Batch normalization."
- description: <<END
-This op is deprecated. Prefer `tf.nn.batch_normalization`.
-END
-}
-op {
- graph_op_name: "BatchNormWithGlobalNormalizationGrad"
- endpoint {
- name: "BatchNormWithGlobalNormalizationGrad"
- }
- summary: "Gradients for batch normalization."
- description: <<END
-This op is deprecated. See `tf.nn.batch_normalization`.
-END
-}
-op {
- graph_op_name: "BatchSelfAdjointEig"
- endpoint {
- name: "BatchSelfAdjointEig"
- }
-}
-op {
- graph_op_name: "BatchSelfAdjointEigV2"
- endpoint {
- name: "BatchSelfAdjointEigV2"
- }
-}
-op {
- graph_op_name: "BatchSvd"
- endpoint {
- name: "BatchSvd"
- }
-}
-op {
- graph_op_name: "BatchToSpace"
- endpoint {
- name: "BatchToSpace"
- }
- summary: "BatchToSpace for 4-D tensors of type T."
- description: <<END
-This is a legacy version of the more general BatchToSpaceND.
-
-Rearranges (permutes) data from batch into blocks of spatial data, followed by
-cropping. This is the reverse transformation of SpaceToBatch. More specifically,
-this op outputs a copy of the input tensor where values from the `batch`
-dimension are moved in spatial blocks to the `height` and `width` dimensions,
-followed by cropping along the `height` and `width` dimensions.
-END
-}
-op {
- graph_op_name: "BatchToSpaceND"
- endpoint {
- name: "BatchToSpaceND"
- }
- summary: "BatchToSpace for N-D tensors of type T."
- description: <<END
-This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
-`block_shape + [batch]`, interleaves these blocks back into the grid defined by
-the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
-the input. The spatial dimensions of this intermediate result are then
-optionally cropped according to `crops` to produce the output. This is the
-reverse of SpaceToBatch. See below for a precise description.
-END
-}
-op {
- graph_op_name: "Betainc"
- endpoint {
- name: "Betainc"
- }
- summary: "Compute the regularized incomplete beta integral \\\\(I_x(a, b)\\\\)."
- description: <<END
-The regularized incomplete beta integral is defined as:
-
-
-\\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
-
-where
-
-
-\\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
-
-
-is the incomplete beta function and \\(B(a, b)\\) is the *complete*
-beta function.
-END
-}
-op {
- graph_op_name: "BiasAdd"
- endpoint {
- name: "BiasAdd"
- }
- summary: "Adds `bias` to `value`."
- description: <<END
-This is a special case of `tf.add` where `bias` is restricted to be 1-D.
-Broadcasting is supported, so `value` may have any number of dimensions.
-END
-}
-op {
- graph_op_name: "BiasAddGrad"
- endpoint {
- name: "BiasAddGrad"
- }
- summary: "The backward operation for \"BiasAdd\" on the \"bias\" tensor."
- description: <<END
-It accumulates all the values from out_backprop into the feature dimension.
-For NHWC data format, the feature dimension is the last. For NCHW data format,
-the feature dimension is the third-to-last.
-END
-}
-op {
- graph_op_name: "BiasAddV1"
- endpoint {
- name: "BiasAddV1"
- }
- summary: "Adds `bias` to `value`."
- description: <<END
-This is a deprecated version of BiasAdd and will be soon removed.
-
-This is a special case of `tf.add` where `bias` is restricted to be 1-D.
-Broadcasting is supported, so `value` may have any number of dimensions.
-END
-}
-op {
- graph_op_name: "Bincount"
- endpoint {
- name: "Bincount"
- }
- summary: "Counts the number of occurrences of each value in an integer array."
- description: <<END
-Outputs a vector with length `size` and the same dtype as `weights`. If
-`weights` are empty, then index `i` stores the number of times the value `i` is
-counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
-the value in `weights` at each index where the corresponding value in `arr` is
-`i`.
-
-Values in `arr` outside of the range [0, size) are ignored.
-END
-}
-op {
- graph_op_name: "Bitcast"
- endpoint {
- name: "Bitcast"
- }
- summary: "Bitcasts a tensor from one type to another without copying data."
- description: <<END
-Given a tensor `input`, this operation returns a tensor that has the same buffer
-data as `input` with datatype `type`.
-
-If the input datatype `T` is larger than the output datatype `type` then the
-shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
-
-If `T` is smaller than `type`, the operator requires that the rightmost
-dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
-[..., sizeof(`type`)/sizeof(`T`)] to [...].
-
-*NOTE*: Bitcast is implemented as a low-level cast, so machines with different
-endian orderings will give different results.
-END
-}
-op {
- graph_op_name: "BitwiseAnd"
- endpoint {
- name: "BitwiseAnd"
- }
- summary: "Elementwise computes the bitwise AND of `x` and `y`."
- description: <<END
-The result will have those bits set, that are set in both `x` and `y`. The
-computation is performed on the underlying representations of `x` and `y`.
-END
-}
-op {
- graph_op_name: "BitwiseOr"
- endpoint {
- name: "BitwiseOr"
- }
- summary: "Elementwise computes the bitwise OR of `x` and `y`."
- description: <<END
-The result will have those bits set, that are set in `x`, `y` or both. The
-computation is performed on the underlying representations of `x` and `y`.
-END
-}
-op {
- graph_op_name: "BitwiseXor"
- endpoint {
- name: "BitwiseXor"
- }
- summary: "Elementwise computes the bitwise XOR of `x` and `y`."
- description: <<END
-The result will have those bits set, that are different in `x` and `y`. The
-computation is performed on the underlying representations of `x` and `y`.
-END
-}
-op {
- graph_op_name: "BroadcastArgs"
- endpoint {
- name: "BroadcastArgs"
- }
- summary: "Return the shape of s0 op s1 with broadcast."
- description: <<END
-Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
-broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
-END
-}
-op {
- graph_op_name: "BroadcastGradientArgs"
- endpoint {
- name: "BroadcastGradientArgs"
- }
- summary: "Return the reduction indices for computing gradients of s0 op s1 with broadcast."
- description: <<END
-This is typically used by gradient computations for a broadcasting operation.
-END
-}
-op {
- graph_op_name: "Bucketize"
- endpoint {
- name: "Bucketize"
- }
- summary: "Bucketizes \'input\' based on \'boundaries\'."
- description: <<END
-For example, if the inputs are
- boundaries = [0, 10, 100]
- input = [[-5, 10000]
- [150, 10]
- [5, 100]]
-
-then the output will be
- output = [[0, 3]
- [3, 2]
- [1, 3]]
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_Barrier.pbtxt b/tensorflow/core/api_def/base_api/api_def_Barrier.pbtxt
new file mode 100644
index 0000000000..3422ebf2f6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Barrier.pbtxt
@@ -0,0 +1,55 @@
+op {
+ graph_op_name: "Barrier"
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the barrier.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a value.
+END
+ }
+ attr {
+ name: "shapes"
+ description: <<END
+The shape of each component in a value. Each shape must be 1 in the
+first dimension. The length of this attr must be the same as the length of
+component_types.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+The capacity of the barrier. The default capacity is MAX_INT32,
+which is the largest capacity of the underlying queue.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this barrier is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this barrier will be shared under the given name
+across multiple sessions.
+END
+ }
+ summary: "Defines a barrier that persists across different graph executions."
+ description: <<END
+A barrier represents a key-value map, where each key is a string, and
+each value is a tuple of tensors.
+
+At runtime, the barrier contains 'complete' and 'incomplete'
+elements. A complete element has defined tensors for all components of
+its value tuple, and may be accessed using BarrierTakeMany. An
+incomplete element has some undefined components in its value tuple,
+and may be updated using BarrierInsertMany.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BarrierClose.pbtxt b/tensorflow/core/api_def/base_api/api_def_BarrierClose.pbtxt
new file mode 100644
index 0000000000..a81235ce8a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BarrierClose.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "BarrierClose"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a barrier.
+END
+ }
+ attr {
+ name: "cancel_pending_enqueues"
+ description: <<END
+If true, all pending enqueue requests that are
+blocked on the barrier's queue will be canceled. InsertMany will fail, even
+if no new key is introduced.
+END
+ }
+ summary: "Closes the given barrier."
+ description: <<END
+This operation signals that no more new elements will be inserted in the
+given barrier. Subsequent InsertMany that try to introduce a new key will fail.
+Subsequent InsertMany operations that just add missing components to already
+existing elements will continue to succeed. Subsequent TakeMany operations will
+continue to succeed if sufficient completed elements remain in the barrier.
+Subsequent TakeMany operations that would block will fail immediately.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BarrierIncompleteSize.pbtxt b/tensorflow/core/api_def/base_api/api_def_BarrierIncompleteSize.pbtxt
new file mode 100644
index 0000000000..61f41da77f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BarrierIncompleteSize.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "BarrierIncompleteSize"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a barrier.
+END
+ }
+ out_arg {
+ name: "size"
+ description: <<END
+The number of incomplete elements (i.e. those with some of their value
+components not set) in the barrier.
+END
+ }
+ summary: "Computes the number of incomplete elements in the given barrier."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BarrierInsertMany.pbtxt b/tensorflow/core/api_def/base_api/api_def_BarrierInsertMany.pbtxt
new file mode 100644
index 0000000000..645e1eee08
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BarrierInsertMany.pbtxt
@@ -0,0 +1,35 @@
+op {
+ graph_op_name: "BarrierInsertMany"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a barrier.
+END
+ }
+ in_arg {
+ name: "keys"
+ description: <<END
+A one-dimensional tensor of keys, with length n.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+An any-dimensional tensor of values, which are associated with the
+respective keys. The 0th dimension must have length n.
+END
+ }
+ attr {
+ name: "component_index"
+ description: <<END
+The component of the barrier elements that is being assigned.
+END
+ }
+ summary: "For each key, assigns the respective value to the specified component."
+ description: <<END
+If a key is not found in the barrier, this operation will create a new
+incomplete element. If a key is found in the barrier, and the element
+already has a value at component_index, this operation will fail with
+INVALID_ARGUMENT, and leave the barrier in an undefined state.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BarrierReadySize.pbtxt b/tensorflow/core/api_def/base_api/api_def_BarrierReadySize.pbtxt
new file mode 100644
index 0000000000..38e92d3483
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BarrierReadySize.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "BarrierReadySize"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a barrier.
+END
+ }
+ out_arg {
+ name: "size"
+ description: <<END
+The number of complete elements (i.e. those with all of their value
+components set) in the barrier.
+END
+ }
+ summary: "Computes the number of complete elements in the given barrier."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BarrierTakeMany.pbtxt b/tensorflow/core/api_def/base_api/api_def_BarrierTakeMany.pbtxt
new file mode 100644
index 0000000000..584ce7536b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BarrierTakeMany.pbtxt
@@ -0,0 +1,68 @@
+op {
+ graph_op_name: "BarrierTakeMany"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a barrier.
+END
+ }
+ in_arg {
+ name: "num_elements"
+ description: <<END
+A single-element tensor containing the number of elements to
+take.
+END
+ }
+ out_arg {
+ name: "indices"
+ description: <<END
+A one-dimensional tensor of indices, with length num_elems.
+These indices refer to the batch in which the values were placed into the
+barrier (starting with MIN_LONG and increasing with each BarrierInsertMany).
+END
+ }
+ out_arg {
+ name: "keys"
+ description: <<END
+A one-dimensional tensor of keys, with length num_elements.
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+One any-dimensional tensor per component in a barrier element. All
+values have length num_elements in the 0th dimension.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a value.
+END
+ }
+ attr {
+ name: "allow_small_batch"
+ description: <<END
+Allow to return less than num_elements items if barrier is
+already closed.
+END
+ }
+ attr {
+ name: "timeout_ms"
+ description: <<END
+If the queue is empty, this operation will block for up to
+timeout_ms milliseconds.
+Note: This option is not supported yet.
+END
+ }
+ summary: "Takes the given number of completed elements from a barrier."
+ description: <<END
+This operation concatenates completed-element component tensors along
+the 0th dimension to make a single component tensor.
+
+Elements come out of the barrier when they are complete, and in the order
+in which they were placed into the barrier. The indices output provides
+information about the batch in which each element was originally inserted
+into the barrier.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchCholesky.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchCholesky.pbtxt
new file mode 100644
index 0000000000..758ed3c6d3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchCholesky.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchCholesky"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchCholeskyGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchCholeskyGrad.pbtxt
new file mode 100644
index 0000000000..9099433f0b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchCholeskyGrad.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchCholeskyGrad"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchDataset.pbtxt
new file mode 100644
index 0000000000..639d962874
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchDataset.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "BatchDataset"
+ in_arg {
+ name: "batch_size"
+ description: <<END
+A scalar representing the number of elements to accumulate in a
+batch.
+END
+ }
+ summary: "Creates a dataset that batches `batch_size` elements from `input_dataset`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchFFT.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchFFT.pbtxt
new file mode 100644
index 0000000000..5ef542cc8b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchFFT.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchFFT"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchFFT2D.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchFFT2D.pbtxt
new file mode 100644
index 0000000000..1ce0612aaf
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchFFT2D.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchFFT2D"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchFFT3D.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchFFT3D.pbtxt
new file mode 100644
index 0000000000..5834e0337f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchFFT3D.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchFFT3D"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchIFFT.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchIFFT.pbtxt
new file mode 100644
index 0000000000..931365f0a4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchIFFT.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchIFFT"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchIFFT2D.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchIFFT2D.pbtxt
new file mode 100644
index 0000000000..af0bf62461
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchIFFT2D.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchIFFT2D"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchIFFT3D.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchIFFT3D.pbtxt
new file mode 100644
index 0000000000..f051e1f5e0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchIFFT3D.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchIFFT3D"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchMatMul.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchMatMul.pbtxt
new file mode 100644
index 0000000000..7999598aff
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchMatMul.pbtxt
@@ -0,0 +1,54 @@
+op {
+ graph_op_name: "BatchMatMul"
+ in_arg {
+ name: "x"
+ description: <<END
+2-D or higher with shape `[..., r_x, c_x]`.
+END
+ }
+ in_arg {
+ name: "y"
+ description: <<END
+2-D or higher with shape `[..., r_y, c_y]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+3-D or higher with shape `[..., r_o, c_o]`
+END
+ }
+ attr {
+ name: "adj_x"
+ description: <<END
+If `True`, adjoint the slices of `x`. Defaults to `False`.
+END
+ }
+ attr {
+ name: "adj_y"
+ description: <<END
+If `True`, adjoint the slices of `y`. Defaults to `False`.
+END
+ }
+ summary: "Multiplies slices of two tensors in batches."
+ description: <<END
+Multiplies all slices of `Tensor` `x` and `y` (each slice can be
+viewed as an element of a batch), and arranges the individual results
+in a single output tensor of the same batch size. Each of the
+individual slices can optionally be adjointed (to adjoint a matrix
+means to transpose and conjugate it) before multiplication by setting
+the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
+
+The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
+and `[..., r_y, c_y]`.
+
+The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
+
+ r_o = c_x if adj_x else r_x
+ c_o = r_y if adj_y else c_y
+
+It is computed as:
+
+ output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchMatrixBandPart.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchMatrixBandPart.pbtxt
new file mode 100644
index 0000000000..592a95a14e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchMatrixBandPart.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchMatrixBandPart"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchMatrixDeterminant.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchMatrixDeterminant.pbtxt
new file mode 100644
index 0000000000..9f1c5a897c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchMatrixDeterminant.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchMatrixDeterminant"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchMatrixDiag.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchMatrixDiag.pbtxt
new file mode 100644
index 0000000000..f7ed5cca2b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchMatrixDiag.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchMatrixDiag"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchMatrixDiagPart.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchMatrixDiagPart.pbtxt
new file mode 100644
index 0000000000..e96bb9c57f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchMatrixDiagPart.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchMatrixDiagPart"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchMatrixInverse.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchMatrixInverse.pbtxt
new file mode 100644
index 0000000000..41d4305f5f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchMatrixInverse.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchMatrixInverse"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchMatrixSetDiag.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchMatrixSetDiag.pbtxt
new file mode 100644
index 0000000000..b11edf2ba1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchMatrixSetDiag.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchMatrixSetDiag"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchMatrixSolve.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchMatrixSolve.pbtxt
new file mode 100644
index 0000000000..6012ea4a22
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchMatrixSolve.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchMatrixSolve"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchMatrixSolveLs.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchMatrixSolveLs.pbtxt
new file mode 100644
index 0000000000..0fd6e055c4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchMatrixSolveLs.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchMatrixSolveLs"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchMatrixTriangularSolve.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchMatrixTriangularSolve.pbtxt
new file mode 100644
index 0000000000..22fcb4a02f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchMatrixTriangularSolve.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchMatrixTriangularSolve"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchNormWithGlobalNormalization.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchNormWithGlobalNormalization.pbtxt
new file mode 100644
index 0000000000..2943f5f009
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchNormWithGlobalNormalization.pbtxt
@@ -0,0 +1,57 @@
+op {
+ graph_op_name: "BatchNormWithGlobalNormalization"
+ in_arg {
+ name: "t"
+ description: <<END
+A 4D input Tensor.
+END
+ }
+ in_arg {
+ name: "m"
+ description: <<END
+A 1D mean Tensor with size matching the last dimension of t.
+This is the first output from tf.nn.moments,
+or a saved moving average thereof.
+END
+ }
+ in_arg {
+ name: "v"
+ description: <<END
+A 1D variance Tensor with size matching the last dimension of t.
+This is the second output from tf.nn.moments,
+or a saved moving average thereof.
+END
+ }
+ in_arg {
+ name: "beta"
+ description: <<END
+A 1D beta Tensor with size matching the last dimension of t.
+An offset to be added to the normalized tensor.
+END
+ }
+ in_arg {
+ name: "gamma"
+ description: <<END
+A 1D gamma Tensor with size matching the last dimension of t.
+If "scale_after_normalization" is true, this tensor will be multiplied
+with the normalized tensor.
+END
+ }
+ attr {
+ name: "variance_epsilon"
+ description: <<END
+A small float number to avoid dividing by 0.
+END
+ }
+ attr {
+ name: "scale_after_normalization"
+ description: <<END
+A bool indicating whether the resulted tensor
+needs to be multiplied with gamma.
+END
+ }
+ summary: "Batch normalization."
+ description: <<END
+This op is deprecated. Prefer `tf.nn.batch_normalization`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt
new file mode 100644
index 0000000000..a702e303f3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt
@@ -0,0 +1,86 @@
+op {
+ graph_op_name: "BatchNormWithGlobalNormalizationGrad"
+ in_arg {
+ name: "t"
+ description: <<END
+A 4D input Tensor.
+END
+ }
+ in_arg {
+ name: "m"
+ description: <<END
+A 1D mean Tensor with size matching the last dimension of t.
+This is the first output from tf.nn.moments,
+or a saved moving average thereof.
+END
+ }
+ in_arg {
+ name: "v"
+ description: <<END
+A 1D variance Tensor with size matching the last dimension of t.
+This is the second output from tf.nn.moments,
+or a saved moving average thereof.
+END
+ }
+ in_arg {
+ name: "gamma"
+ description: <<END
+A 1D gamma Tensor with size matching the last dimension of t.
+If "scale_after_normalization" is true, this Tensor will be multiplied
+with the normalized Tensor.
+END
+ }
+ in_arg {
+ name: "backprop"
+ description: <<END
+4D backprop Tensor.
+END
+ }
+ out_arg {
+ name: "dx"
+ description: <<END
+4D backprop tensor for input.
+END
+ }
+ out_arg {
+ name: "dm"
+ description: <<END
+1D backprop tensor for mean.
+END
+ }
+ out_arg {
+ name: "dv"
+ description: <<END
+1D backprop tensor for variance.
+END
+ }
+ out_arg {
+ name: "db"
+ description: <<END
+1D backprop tensor for beta.
+END
+ }
+ out_arg {
+ name: "dg"
+ description: <<END
+1D backprop tensor for gamma.
+END
+ }
+ attr {
+ name: "variance_epsilon"
+ description: <<END
+A small float number to avoid dividing by 0.
+END
+ }
+ attr {
+ name: "scale_after_normalization"
+ description: <<END
+A bool indicating whether the resulted tensor
+needs to be multiplied with gamma.
+END
+ }
+ summary: "Gradients for batch normalization."
+ description: <<END
+This op is deprecated. See `tf.nn.batch_normalization`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchSelfAdjointEig.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchSelfAdjointEig.pbtxt
new file mode 100644
index 0000000000..8fd3ee3b6b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchSelfAdjointEig.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchSelfAdjointEig"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchSelfAdjointEigV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchSelfAdjointEigV2.pbtxt
new file mode 100644
index 0000000000..9b025ab048
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchSelfAdjointEigV2.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchSelfAdjointEigV2"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchSvd.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchSvd.pbtxt
new file mode 100644
index 0000000000..8e5a51b58f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchSvd.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "BatchSvd"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchToSpace.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchToSpace.pbtxt
new file mode 100644
index 0000000000..ee9a5a01a4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchToSpace.pbtxt
@@ -0,0 +1,104 @@
+op {
+ graph_op_name: "BatchToSpace"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D tensor with shape
+`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
+ depth]`. Note that the batch size of the input tensor must be divisible by
+`block_size * block_size`.
+END
+ }
+ in_arg {
+ name: "crops"
+ description: <<END
+2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
+how many elements to crop from the intermediate result across the spatial
+dimensions as follows:
+
+ crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D with shape `[batch, height, width, depth]`, where:
+
+ height = height_pad - crop_top - crop_bottom
+ width = width_pad - crop_left - crop_right
+
+The attr `block_size` must be greater than one. It indicates the block size.
+
+Some examples:
+
+(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
+
+```
+[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
+```
+
+The output tensor has shape `[1, 2, 2, 1]` and value:
+
+```
+x = [[[[1], [2]], [[3], [4]]]]
+```
+
+(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
+
+```
+[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
+```
+
+The output tensor has shape `[1, 2, 2, 3]` and value:
+
+```
+x = [[[[1, 2, 3], [4, 5, 6]],
+ [[7, 8, 9], [10, 11, 12]]]]
+```
+
+(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
+
+```
+x = [[[[1], [3]], [[9], [11]]],
+ [[[2], [4]], [[10], [12]]],
+ [[[5], [7]], [[13], [15]]],
+ [[[6], [8]], [[14], [16]]]]
+```
+
+The output tensor has shape `[1, 4, 4, 1]` and value:
+
+```
+x = [[[1], [2], [3], [4]],
+ [[5], [6], [7], [8]],
+ [[9], [10], [11], [12]],
+ [[13], [14], [15], [16]]]
+```
+
+(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
+
+```
+x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
+ [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
+```
+
+The output tensor has shape `[2, 2, 4, 1]` and value:
+
+```
+x = [[[[1], [3]], [[5], [7]]],
+ [[[2], [4]], [[10], [12]]],
+ [[[5], [7]], [[13], [15]]],
+ [[[6], [8]], [[14], [16]]]]
+```
+END
+ }
+ summary: "BatchToSpace for 4-D tensors of type T."
+ description: <<END
+This is a legacy version of the more general BatchToSpaceND.
+
+Rearranges (permutes) data from batch into blocks of spatial data, followed by
+cropping. This is the reverse transformation of SpaceToBatch. More specifically,
+this op outputs a copy of the input tensor where values from the `batch`
+dimension are moved in spatial blocks to the `height` and `width` dimensions,
+followed by cropping along the `height` and `width` dimensions.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BatchToSpaceND.pbtxt b/tensorflow/core/api_def/base_api/api_def_BatchToSpaceND.pbtxt
new file mode 100644
index 0000000000..8e25f9995e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BatchToSpaceND.pbtxt
@@ -0,0 +1,139 @@
+op {
+ graph_op_name: "BatchToSpaceND"
+ in_arg {
+ name: "input"
+ description: <<END
+N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
+where spatial_shape has M dimensions.
+END
+ }
+ in_arg {
+ name: "block_shape"
+ description: <<END
+1-D with shape `[M]`, all values must be >= 1.
+END
+ }
+ in_arg {
+ name: "crops"
+ description: <<END
+2-D with shape `[M, 2]`, all values must be >= 0.
+ `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
+ dimension `i + 1`, which corresponds to spatial dimension `i`. It is
+ required that
+ `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
+
+This operation is equivalent to the following steps:
+
+1. Reshape `input` to `reshaped` of shape:
+ [block_shape[0], ..., block_shape[M-1],
+ batch / prod(block_shape),
+ input_shape[1], ..., input_shape[N-1]]
+
+2. Permute dimensions of `reshaped` to produce `permuted` of shape
+ [batch / prod(block_shape),
+
+ input_shape[1], block_shape[0],
+ ...,
+ input_shape[M], block_shape[M-1],
+
+ input_shape[M+1], ..., input_shape[N-1]]
+
+3. Reshape `permuted` to produce `reshaped_permuted` of shape
+ [batch / prod(block_shape),
+
+ input_shape[1] * block_shape[0],
+ ...,
+ input_shape[M] * block_shape[M-1],
+
+ input_shape[M+1],
+ ...,
+ input_shape[N-1]]
+
+4. Crop the start and end of dimensions `[1, ..., M]` of
+ `reshaped_permuted` according to `crops` to produce the output of shape:
+ [batch / prod(block_shape),
+
+ input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
+ ...,
+ input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
+
+ input_shape[M+1], ..., input_shape[N-1]]
+
+Some examples:
+
+(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
+ `crops = [[0, 0], [0, 0]]`:
+
+```
+[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
+```
+
+The output tensor has shape `[1, 2, 2, 1]` and value:
+
+```
+x = [[[[1], [2]], [[3], [4]]]]
+```
+
+(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
+ `crops = [[0, 0], [0, 0]]`:
+
+```
+[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
+```
+
+The output tensor has shape `[1, 2, 2, 3]` and value:
+
+```
+x = [[[[1, 2, 3], [4, 5, 6]],
+ [[7, 8, 9], [10, 11, 12]]]]
+```
+
+(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
+ `crops = [[0, 0], [0, 0]]`:
+
+```
+x = [[[[1], [3]], [[9], [11]]],
+ [[[2], [4]], [[10], [12]]],
+ [[[5], [7]], [[13], [15]]],
+ [[[6], [8]], [[14], [16]]]]
+```
+
+The output tensor has shape `[1, 4, 4, 1]` and value:
+
+```
+x = [[[1], [2], [3], [4]],
+ [[5], [6], [7], [8]],
+ [[9], [10], [11], [12]],
+ [[13], [14], [15], [16]]]
+```
+
+(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
+ `crops = [[0, 0], [2, 0]]`:
+
+```
+x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
+ [[[0], [2], [4]]], [[[0], [10], [12]]],
+ [[[0], [5], [7]]], [[[0], [13], [15]]],
+ [[[0], [6], [8]]], [[[0], [14], [16]]]]
+```
+
+The output tensor has shape `[2, 2, 4, 1]` and value:
+
+```
+x = [[[[1], [2], [3], [4]],
+ [[5], [6], [7], [8]]],
+ [[[9], [10], [11], [12]],
+ [[13], [14], [15], [16]]]]
+```
+END
+ }
+ summary: "BatchToSpace for N-D tensors of type T."
+ description: <<END
+This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
+`block_shape + [batch]`, interleaves these blocks back into the grid defined by
+the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
+the input. The spatial dimensions of this intermediate result are then
+optionally cropped according to `crops` to produce the output. This is the
+reverse of SpaceToBatch. See below for a precise description.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Betainc.pbtxt b/tensorflow/core/api_def/base_api/api_def_Betainc.pbtxt
new file mode 100644
index 0000000000..5d7df75122
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Betainc.pbtxt
@@ -0,0 +1,19 @@
+op {
+ graph_op_name: "Betainc"
+ summary: "Compute the regularized incomplete beta integral \\\\(I_x(a, b)\\\\)."
+ description: <<END
+The regularized incomplete beta integral is defined as:
+
+
+\\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
+
+where
+
+
+\\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
+
+
+is the incomplete beta function and \\(B(a, b)\\) is the *complete*
+beta function.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BiasAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_BiasAdd.pbtxt
new file mode 100644
index 0000000000..58266e74a2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BiasAdd.pbtxt
@@ -0,0 +1,38 @@
+op {
+ graph_op_name: "BiasAdd"
+ in_arg {
+ name: "value"
+ description: <<END
+Any number of dimensions.
+END
+ }
+ in_arg {
+ name: "bias"
+ description: <<END
+1-D with size the last dimension of `value`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Broadcasted sum of `value` and `bias`.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the bias tensor will be added to the last dimension
+of the value tensor.
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+The tensor will be added to "in_channels", the third-to-the-last
+ dimension.
+END
+ }
+ summary: "Adds `bias` to `value`."
+ description: <<END
+This is a special case of `tf.add` where `bias` is restricted to be 1-D.
+Broadcasting is supported, so `value` may have any number of dimensions.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BiasAddGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_BiasAddGrad.pbtxt
new file mode 100644
index 0000000000..5f2adf1a35
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BiasAddGrad.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "BiasAddGrad"
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+Any number of dimensions.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+1-D with size the feature dimension of `out_backprop`.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the bias tensor will be added to the last dimension
+of the value tensor.
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+The tensor will be added to "in_channels", the third-to-the-last
+ dimension.
+END
+ }
+ summary: "The backward operation for \"BiasAdd\" on the \"bias\" tensor."
+ description: <<END
+It accumulates all the values from out_backprop into the feature dimension.
+For NHWC data format, the feature dimension is the last. For NCHW data format,
+the feature dimension is the third-to-last.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BiasAddV1.pbtxt b/tensorflow/core/api_def/base_api/api_def_BiasAddV1.pbtxt
new file mode 100644
index 0000000000..9799682bf2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BiasAddV1.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "BiasAddV1"
+ visibility: SKIP
+ in_arg {
+ name: "value"
+ description: <<END
+Any number of dimensions.
+END
+ }
+ in_arg {
+ name: "bias"
+ description: <<END
+1-D with size the last dimension of `value`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Broadcasted sum of `value` and `bias`.
+END
+ }
+ summary: "Adds `bias` to `value`."
+ description: <<END
+This is a deprecated version of BiasAdd and will be soon removed.
+
+This is a special case of `tf.add` where `bias` is restricted to be 1-D.
+Broadcasting is supported, so `value` may have any number of dimensions.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Bincount.pbtxt b/tensorflow/core/api_def/base_api/api_def_Bincount.pbtxt
new file mode 100644
index 0000000000..1016f2ff67
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Bincount.pbtxt
@@ -0,0 +1,40 @@
+op {
+ graph_op_name: "Bincount"
+ in_arg {
+ name: "arr"
+ description: <<END
+int32 `Tensor`.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+non-negative int32 scalar `Tensor`.
+END
+ }
+ in_arg {
+ name: "weights"
+ description: <<END
+is an int32, int64, float32, or float64 `Tensor` with the same
+shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
+equal to 1.
+END
+ }
+ out_arg {
+ name: "bins"
+ description: <<END
+1D `Tensor` with length equal to `size`. The counts or summed weights for
+each value in the range [0, size).
+END
+ }
+ summary: "Counts the number of occurrences of each value in an integer array."
+ description: <<END
+Outputs a vector with length `size` and the same dtype as `weights`. If
+`weights` are empty, then index `i` stores the number of times the value `i` is
+counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
+the value in `weights` at each index where the corresponding value in `arr` is
+`i`.
+
+Values in `arr` outside of the range [0, size) are ignored.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt
new file mode 100644
index 0000000000..e4d4f9ea08
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Bitcast.pbtxt
@@ -0,0 +1,18 @@
+op {
+ graph_op_name: "Bitcast"
+ summary: "Bitcasts a tensor from one type to another without copying data."
+ description: <<END
+Given a tensor `input`, this operation returns a tensor that has the same buffer
+data as `input` with datatype `type`.
+
+If the input datatype `T` is larger than the output datatype `type` then the
+shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
+
+If `T` is smaller than `type`, the operator requires that the rightmost
+dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
+[..., sizeof(`type`)/sizeof(`T`)] to [...].
+
+*NOTE*: Bitcast is implemented as a low-level cast, so machines with different
+endian orderings will give different results.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BitwiseAnd.pbtxt b/tensorflow/core/api_def/base_api/api_def_BitwiseAnd.pbtxt
new file mode 100644
index 0000000000..44d34ce9ec
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BitwiseAnd.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "BitwiseAnd"
+ summary: "Elementwise computes the bitwise AND of `x` and `y`."
+ description: <<END
+The result will have those bits set, that are set in both `x` and `y`. The
+computation is performed on the underlying representations of `x` and `y`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BitwiseOr.pbtxt b/tensorflow/core/api_def/base_api/api_def_BitwiseOr.pbtxt
new file mode 100644
index 0000000000..e9c8feb40d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BitwiseOr.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "BitwiseOr"
+ summary: "Elementwise computes the bitwise OR of `x` and `y`."
+ description: <<END
+The result will have those bits set, that are set in `x`, `y` or both. The
+computation is performed on the underlying representations of `x` and `y`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BitwiseXor.pbtxt b/tensorflow/core/api_def/base_api/api_def_BitwiseXor.pbtxt
new file mode 100644
index 0000000000..22be3d134a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BitwiseXor.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "BitwiseXor"
+ summary: "Elementwise computes the bitwise XOR of `x` and `y`."
+ description: <<END
+The result will have those bits set, that are different in `x` and `y`. The
+computation is performed on the underlying representations of `x` and `y`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BroadcastArgs.pbtxt b/tensorflow/core/api_def/base_api/api_def_BroadcastArgs.pbtxt
new file mode 100644
index 0000000000..9c8564e218
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BroadcastArgs.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "BroadcastArgs"
+ endpoint {
+ name: "BroadcastDynamicShape"
+ }
+ summary: "Return the shape of s0 op s1 with broadcast."
+ description: <<END
+Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
+broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_BroadcastGradientArgs.pbtxt b/tensorflow/core/api_def/base_api/api_def_BroadcastGradientArgs.pbtxt
new file mode 100644
index 0000000000..a6e4516a26
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_BroadcastGradientArgs.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "BroadcastGradientArgs"
+ visibility: HIDDEN
+ summary: "Return the reduction indices for computing gradients of s0 op s1 with broadcast."
+ description: <<END
+This is typically used by gradient computations for a broadcasting operation.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Bucketize.pbtxt b/tensorflow/core/api_def/base_api/api_def_Bucketize.pbtxt
new file mode 100644
index 0000000000..b464af9530
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Bucketize.pbtxt
@@ -0,0 +1,38 @@
+op {
+ graph_op_name: "Bucketize"
+ in_arg {
+ name: "input"
+ description: <<END
+Any shape of Tensor contains with int or float type.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Same shape with 'input', each value of input replaced with bucket index.
+
+@compatibility(numpy)
+Equivalent to np.digitize.
+@end_compatibility
+END
+ }
+ attr {
+ name: "boundaries"
+ description: <<END
+A sorted list of floats gives the boundary of the buckets.
+END
+ }
+ summary: "Bucketizes \'input\' based on \'boundaries\'."
+ description: <<END
+For example, if the inputs are
+ boundaries = [0, 10, 100]
+ input = [[-5, 10000]
+ [150, 10]
+ [5, 100]]
+
+then the output will be
+ output = [[0, 3]
+ [3, 2]
+ [1, 3]]
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_C.pbtxt b/tensorflow/core/api_def/base_api/api_def_C.pbtxt
deleted file mode 100644
index 48b04b7971..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_C.pbtxt
+++ /dev/null
@@ -1,513 +0,0 @@
-op {
- graph_op_name: "CTCBeamSearchDecoder"
- endpoint {
- name: "CTCBeamSearchDecoder"
- }
- summary: "Performs beam search decoding on the logits given in input."
- description: <<END
-A note about the attribute merge_repeated: For the beam search decoder,
-this means that if consecutive entries in a beam are the same, only
-the first of these is emitted. That is, when the top path is "A B B B B",
-"A B" is returned if merge_repeated = True but "A B B B B" is
-returned if merge_repeated = False.
-END
-}
-op {
- graph_op_name: "CTCGreedyDecoder"
- endpoint {
- name: "CTCGreedyDecoder"
- }
- summary: "Performs greedy decoding on the logits given in inputs."
- description: <<END
-A note about the attribute merge_repeated: if enabled, when
-consecutive logits' maximum indices are the same, only the first of
-these is emitted. Labeling the blank '*', the sequence "A B B * B B"
-becomes "A B B" if merge_repeated = True and "A B B B B" if
-merge_repeated = False.
-
-Regardless of the value of merge_repeated, if the maximum index of a given
-time and batch corresponds to the blank, index `(num_classes - 1)`, no new
-element is emitted.
-END
-}
-op {
- graph_op_name: "CTCLoss"
- endpoint {
- name: "CTCLoss"
- }
- summary: "Calculates the CTC Loss (log probability) for each batch entry. Also calculates"
- description: <<END
-the gradient. This class performs the softmax operation for you, so inputs
-should be e.g. linear projections of outputs by an LSTM.
-END
-}
-op {
- graph_op_name: "CacheDataset"
- endpoint {
- name: "CacheDataset"
- }
- summary: "Creates a dataset that caches elements from `input_dataset`."
- description: <<END
-A CacheDataset will iterate over the input_dataset, and store tensors. If the
-cache already exists, the cache will be used. If the cache is inappropriate
-(e.g. cannot be opened, contains tensors of the wrong shape / size), an error
-will the returned when used.
-END
-}
-op {
- graph_op_name: "Cast"
- endpoint {
- name: "Cast"
- }
- summary: "Cast x of type SrcT to y of DstT."
-}
-op {
- graph_op_name: "Ceil"
- endpoint {
- name: "Ceil"
- }
- summary: "Returns element-wise smallest integer in not less than x."
-}
-op {
- graph_op_name: "CheckNumerics"
- endpoint {
- name: "CheckNumerics"
- }
- summary: "Checks a tensor for NaN and Inf values."
- description: <<END
-When run, reports an `InvalidArgument` error if `tensor` has any values
-that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
-END
-}
-op {
- graph_op_name: "Cholesky"
- endpoint {
- name: "Cholesky"
- }
- summary: "Computes the Cholesky decomposition of one or more square matrices."
- description: <<END
-The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-form square matrices.
-
-The input has to be symmetric and positive definite. Only the lower-triangular
-part of the input will be used for this operation. The upper-triangular part
-will not be read.
-
-The output is a tensor of the same shape as the input
-containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
-
-**Note**: The gradient computation on GPU is faster for large matrices but
-not for large batch dimensions when the submatrices are small. In this
-case it might be faster to use the CPU.
-END
-}
-op {
- graph_op_name: "CholeskyGrad"
- endpoint {
- name: "CholeskyGrad"
- }
- summary: "Computes the reverse mode backpropagated gradient of the Cholesky algorithm."
- description: <<END
-For an explanation see "Differentiation of the Cholesky algorithm" by
-Iain Murray http://arxiv.org/abs/1602.07527.
-END
-}
-op {
- graph_op_name: "CompareAndBitpack"
- endpoint {
- name: "CompareAndBitpack"
- }
- summary: "Compare values of `input` to `threshold` and pack resulting bits into a `uint8`."
- description: <<END
-Each comparison returns a boolean `true` (if `input_value > threshold`)
-or and `false` otherwise.
-
-This operation is useful for Locality-Sensitive-Hashing (LSH) and other
-algorithms that use hashing approximations of cosine and `L2` distances;
-codes can be generated from an input via:
-
-```python
-codebook_size = 50
-codebook_bits = codebook_size * 32
-codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
- dtype=x.dtype,
- initializer=tf.orthogonal_initializer())
-codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
-codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32
-# now codes has shape x.shape[:-1] + [codebook_size]
-```
-
-**NOTE**: Currently, the innermost dimension of the tensor must be divisible
-by 8.
-
-Given an `input` shaped `[s0, s1, ..., s_n]`, the output is
-a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
-END
-}
-op {
- graph_op_name: "Complex"
- endpoint {
- name: "Complex"
- }
- summary: "Converts two real numbers to a complex number."
- description: <<END
-Given a tensor `real` representing the real part of a complex number, and a
-tensor `imag` representing the imaginary part of a complex number, this
-operation returns complex numbers elementwise of the form \\(a + bj\\), where
-*a* represents the `real` part and *b* represents the `imag` part.
-
-The input tensors `real` and `imag` must have the same shape.
-
-For example:
-
-```
-# tensor 'real' is [2.25, 3.25]
-# tensor `imag` is [4.75, 5.75]
-tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
-```
-END
-}
-op {
- graph_op_name: "ComplexAbs"
- endpoint {
- name: "ComplexAbs"
- }
- summary: "Computes the complex absolute value of a tensor."
- description: <<END
-Given a tensor `x` of complex numbers, this operation returns a tensor of type
-`float` or `double` that is the absolute value of each element in `x`. All
-elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
-value is computed as \\( \sqrt{a^2 + b^2}\\).
-END
-}
-op {
- graph_op_name: "ComputeAccidentalHits"
- endpoint {
- name: "ComputeAccidentalHits"
- }
- summary: "Computes the ids of the positions in sampled_candidates that match true_labels."
- description: <<END
-When doing log-odds NCE, the result of this op should be passed through a
-SparseToDense op, then added to the logits of the sampled candidates. This has
-the effect of 'removing' the sampled labels that match the true labels by
-making the classifier sure that they are sampled labels.
-END
-}
-op {
- graph_op_name: "Concat"
- endpoint {
- name: "Concat"
- }
- summary: "Concatenates tensors along one dimension."
-}
-op {
- graph_op_name: "ConcatOffset"
- endpoint {
- name: "ConcatOffset"
- }
- summary: "Computes offsets of concat inputs within its output."
- description: <<END
-For example:
-
-```
-# 'x' is [2, 2, 7]
-# 'y' is [2, 3, 7]
-# 'z' is [2, 5, 7]
-concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
-```
-
-This is typically used by gradient computations for a concat operation.
-END
-}
-op {
- graph_op_name: "ConcatV2"
- endpoint {
- name: "ConcatV2"
- }
- summary: "Concatenates tensors along one dimension."
-}
-op {
- graph_op_name: "ConcatenateDataset"
- endpoint {
- name: "ConcatenateDataset"
- }
- summary: "Creates a dataset that concatenates `input_dataset` with `another_dataset`."
-}
-op {
- graph_op_name: "ConditionalAccumulator"
- endpoint {
- name: "ConditionalAccumulator"
- }
- summary: "A conditional accumulator for aggregating gradients."
- description: <<END
-The accumulator accepts gradients marked with local_step greater or
-equal to the most recent global_step known to the accumulator. The
-average can be extracted from the accumulator, provided sufficient
-gradients have been accumulated. Extracting the average automatically
-resets the aggregate to 0, and increments the global_step recorded by
-the accumulator.
-END
-}
-op {
- graph_op_name: "Conj"
- endpoint {
- name: "Conj"
- }
- summary: "Returns the complex conjugate of a complex number."
- description: <<END
-Given a tensor `input` of complex numbers, this operation returns a tensor of
-complex numbers that are the complex conjugate of each element in `input`. The
-complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
-real part and *b* is the imaginary part.
-
-The complex conjugate returned by this operation is of the form \\(a - bj\\).
-
-For example:
-
-```
-# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
-tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
-```
-END
-}
-op {
- graph_op_name: "Const"
- endpoint {
- name: "Const"
- }
- summary: "Returns a constant tensor."
-}
-op {
- graph_op_name: "ControlTrigger"
- endpoint {
- name: "ControlTrigger"
- }
- summary: "Does nothing. Serves as a control trigger for scheduling."
- description: <<END
-Only useful as a placeholder for control edges.
-END
-}
-op {
- graph_op_name: "Conv2D"
- endpoint {
- name: "Conv2D"
- }
- summary: "Computes a 2-D convolution given 4-D `input` and `filter` tensors."
- description: <<END
-Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
-and a filter / kernel tensor of shape
-`[filter_height, filter_width, in_channels, out_channels]`, this op
-performs the following:
-
-1. Flattens the filter to a 2-D matrix with shape
- `[filter_height * filter_width * in_channels, output_channels]`.
-2. Extracts image patches from the input tensor to form a *virtual*
- tensor of shape `[batch, out_height, out_width,
- filter_height * filter_width * in_channels]`.
-3. For each patch, right-multiplies the filter matrix and the image patch
- vector.
-
-In detail, with the default NHWC format,
-
- output[b, i, j, k] =
- sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
- filter[di, dj, q, k]
-
-Must have `strides[0] = strides[3] = 1`. For the most common case of the same
-horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
-END
-}
-op {
- graph_op_name: "Conv2DBackpropFilter"
- endpoint {
- name: "Conv2DBackpropFilter"
- }
- summary: "Computes the gradients of convolution with respect to the filter."
-}
-op {
- graph_op_name: "Conv2DBackpropInput"
- endpoint {
- name: "Conv2DBackpropInput"
- }
- summary: "Computes the gradients of convolution with respect to the input."
-}
-op {
- graph_op_name: "Conv3D"
- endpoint {
- name: "Conv3D"
- }
- summary: "Computes a 3-D convolution given 5-D `input` and `filter` tensors."
- description: <<END
-In signal processing, cross-correlation is a measure of similarity of
-two waveforms as a function of a time-lag applied to one of them. This
-is also known as a sliding dot product or sliding inner-product.
-
-Our Conv3D implements a form of cross-correlation.
-END
-}
-op {
- graph_op_name: "Conv3DBackpropFilter"
- endpoint {
- name: "Conv3DBackpropFilter"
- }
- summary: "Computes the gradients of 3-D convolution with respect to the filter."
-}
-op {
- graph_op_name: "Conv3DBackpropFilterV2"
- endpoint {
- name: "Conv3DBackpropFilterV2"
- }
- summary: "Computes the gradients of 3-D convolution with respect to the filter."
-}
-op {
- graph_op_name: "Conv3DBackpropInput"
- endpoint {
- name: "Conv3DBackpropInput"
- }
- summary: "Computes the gradients of 3-D convolution with respect to the input."
-}
-op {
- graph_op_name: "Conv3DBackpropInputV2"
- endpoint {
- name: "Conv3DBackpropInputV2"
- }
- summary: "Computes the gradients of 3-D convolution with respect to the input."
-}
-op {
- graph_op_name: "Cos"
- endpoint {
- name: "Cos"
- }
- summary: "Computes cos of x element-wise."
-}
-op {
- graph_op_name: "Cosh"
- endpoint {
- name: "Cosh"
- }
- summary: "Computes hyperbolic cosine of x element-wise."
-}
-op {
- graph_op_name: "CountUpTo"
- endpoint {
- name: "CountUpTo"
- }
- summary: "Increments \'ref\' until it reaches \'limit\'."
-}
-op {
- graph_op_name: "CropAndResize"
- endpoint {
- name: "CropAndResize"
- }
- summary: "Extracts crops from the input image tensor and bilinearly resizes them (possibly"
- description: <<END
-with aspect ratio change) to a common output size specified by `crop_size`. This
-is more general than the `crop_to_bounding_box` op which extracts a fixed size
-slice from the input image and does not allow resizing or aspect ratio change.
-
-Returns a tensor with `crops` from the input `image` at positions defined at the
-bounding box locations in `boxes`. The cropped boxes are all resized (with
-bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
-result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`.
-END
-}
-op {
- graph_op_name: "CropAndResizeGradBoxes"
- endpoint {
- name: "CropAndResizeGradBoxes"
- }
- summary: "Computes the gradient of the crop_and_resize op wrt the input boxes tensor."
-}
-op {
- graph_op_name: "CropAndResizeGradImage"
- endpoint {
- name: "CropAndResizeGradImage"
- }
- summary: "Computes the gradient of the crop_and_resize op wrt the input image tensor."
-}
-op {
- graph_op_name: "Cross"
- endpoint {
- name: "Cross"
- }
- summary: "Compute the pairwise cross product."
- description: <<END
-`a` and `b` must be the same shape; they can either be simple 3-element vectors,
-or any shape where the innermost dimension is 3. In the latter case, each pair
-of corresponding 3-element vectors is cross-multiplied independently.
-END
-}
-op {
- graph_op_name: "Cumprod"
- endpoint {
- name: "Cumprod"
- }
- summary: "Compute the cumulative product of the tensor `x` along `axis`."
- description: <<END
-By default, this op performs an inclusive cumprod, which means that the first
-element of the input is identical to the first element of the output:
-
-```python
-tf.cumprod([a, b, c]) # => [a, a * b, a * b * c]
-```
-
-By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
-performed instead:
-
-```python
-tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]
-```
-
-By setting the `reverse` kwarg to `True`, the cumprod is performed in the
-opposite direction:
-
-```python
-tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]
-```
-
-This is more efficient than using separate `tf.reverse` ops.
-
-The `reverse` and `exclusive` kwargs can also be combined:
-
-```python
-tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]
-```
-END
-}
-op {
- graph_op_name: "Cumsum"
- endpoint {
- name: "Cumsum"
- }
- summary: "Compute the cumulative sum of the tensor `x` along `axis`."
- description: <<END
-By default, this op performs an inclusive cumsum, which means that the first
-element of the input is identical to the first element of the output:
-
-```python
-tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
-```
-
-By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
-performed instead:
-
-```python
-tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
-```
-
-By setting the `reverse` kwarg to `True`, the cumsum is performed in the
-opposite direction:
-
-```python
-tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
-```
-
-This is more efficient than using separate `tf.reverse` ops.
-
-The `reverse` and `exclusive` kwargs can also be combined:
-
-```python
-tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
-```
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_CTCBeamSearchDecoder.pbtxt b/tensorflow/core/api_def/base_api/api_def_CTCBeamSearchDecoder.pbtxt
new file mode 100644
index 0000000000..36eb2fb7b4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_CTCBeamSearchDecoder.pbtxt
@@ -0,0 +1,72 @@
+op {
+ graph_op_name: "CTCBeamSearchDecoder"
+ in_arg {
+ name: "inputs"
+ description: <<END
+3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
+END
+ }
+ in_arg {
+ name: "sequence_length"
+ description: <<END
+A vector containing sequence lengths, size `(batch)`.
+END
+ }
+ out_arg {
+ name: "decoded_indices"
+ description: <<END
+A list (length: top_paths) of indices matrices. Matrix j,
+size `(total_decoded_outputs[j] x 2)`, has indices of a
+`SparseTensor<int64, 2>`. The rows store: [batch, time].
+END
+ }
+ out_arg {
+ name: "decoded_values"
+ description: <<END
+A list (length: top_paths) of values vectors. Vector j,
+size `(length total_decoded_outputs[j])`, has the values of a
+`SparseTensor<int64, 2>`. The vector stores the decoded classes for beam j.
+END
+ }
+ out_arg {
+ name: "decoded_shape"
+ description: <<END
+A list (length: top_paths) of shape vector. Vector j,
+size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
+Its values are: `[batch_size, max_decoded_length[j]]`.
+END
+ }
+ out_arg {
+ name: "log_probability"
+ description: <<END
+A matrix, shaped: `(batch_size x top_paths)`. The
+sequence log-probabilities.
+END
+ }
+ attr {
+ name: "beam_width"
+ description: <<END
+A scalar >= 0 (beam search beam width).
+END
+ }
+ attr {
+ name: "top_paths"
+ description: <<END
+A scalar >= 0, <= beam_width (controls output size).
+END
+ }
+ attr {
+ name: "merge_repeated"
+ description: <<END
+If true, merge repeated classes in output.
+END
+ }
+ summary: "Performs beam search decoding on the logits given in input."
+ description: <<END
+A note about the attribute merge_repeated: For the beam search decoder,
+this means that if consecutive entries in a beam are the same, only
+the first of these is emitted. That is, when the top path is "A B B B B",
+"A B" is returned if merge_repeated = True but "A B B B B" is
+returned if merge_repeated = False.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_CTCGreedyDecoder.pbtxt b/tensorflow/core/api_def/base_api/api_def_CTCGreedyDecoder.pbtxt
new file mode 100644
index 0000000000..814f5350a2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_CTCGreedyDecoder.pbtxt
@@ -0,0 +1,61 @@
+op {
+ graph_op_name: "CTCGreedyDecoder"
+ in_arg {
+ name: "inputs"
+ description: <<END
+3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
+END
+ }
+ in_arg {
+ name: "sequence_length"
+ description: <<END
+A vector containing sequence lengths, size `(batch_size)`.
+END
+ }
+ out_arg {
+ name: "decoded_indices"
+ description: <<END
+Indices matrix, size `(total_decoded_outputs x 2)`,
+of a `SparseTensor<int64, 2>`. The rows store: [batch, time].
+END
+ }
+ out_arg {
+ name: "decoded_values"
+ description: <<END
+Values vector, size: `(total_decoded_outputs)`,
+of a `SparseTensor<int64, 2>`. The vector stores the decoded classes.
+END
+ }
+ out_arg {
+ name: "decoded_shape"
+ description: <<END
+Shape vector, size `(2)`, of the decoded SparseTensor.
+Values are: `[batch_size, max_decoded_length]`.
+END
+ }
+ out_arg {
+ name: "log_probability"
+ description: <<END
+Matrix, size `(batch_size x 1)`, containing sequence
+log-probabilities.
+END
+ }
+ attr {
+ name: "merge_repeated"
+ description: <<END
+If True, merge repeated classes in output.
+END
+ }
+ summary: "Performs greedy decoding on the logits given in inputs."
+ description: <<END
+A note about the attribute merge_repeated: if enabled, when
+consecutive logits' maximum indices are the same, only the first of
+these is emitted. Labeling the blank '*', the sequence "A B B * B B"
+becomes "A B B" if merge_repeated = True and "A B B B B" if
+merge_repeated = False.
+
+Regardless of the value of merge_repeated, if the maximum index of a given
+time and batch corresponds to the blank, index `(num_classes - 1)`, no new
+element is emitted.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_CTCLoss.pbtxt b/tensorflow/core/api_def/base_api/api_def_CTCLoss.pbtxt
new file mode 100644
index 0000000000..a85597ae6e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_CTCLoss.pbtxt
@@ -0,0 +1,70 @@
+op {
+ graph_op_name: "CTCLoss"
+ in_arg {
+ name: "inputs"
+ description: <<END
+3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
+END
+ }
+ in_arg {
+ name: "labels_indices"
+ description: <<END
+The indices of a `SparseTensor<int32, 2>`.
+`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
+`(batch b, time t)`.
+END
+ }
+ in_arg {
+ name: "labels_values"
+ description: <<END
+The values (labels) associated with the given batch and time.
+END
+ }
+ in_arg {
+ name: "sequence_length"
+ description: <<END
+A vector containing sequence lengths (batch).
+END
+ }
+ out_arg {
+ name: "loss"
+ description: <<END
+A vector (batch) containing log-probabilities.
+END
+ }
+ out_arg {
+ name: "gradient"
+ description: <<END
+The gradient of `loss`. 3-D, shape:
+`(max_time x batch_size x num_classes)`.
+END
+ }
+ attr {
+ name: "preprocess_collapse_repeated"
+ description: <<END
+Scalar, if true then repeated labels are
+collapsed prior to the CTC calculation.
+END
+ }
+ attr {
+ name: "ctc_merge_repeated"
+ description: <<END
+Scalar. If set to false, *during* CTC calculation
+repeated non-blank labels will not be merged and are interpreted as
+individual labels. This is a simplified version of CTC.
+END
+ }
+ attr {
+ name: "ignore_longer_outputs_than_inputs"
+ description: <<END
+Scalar. If set to true, during CTC
+calculation, items that have longer output sequences than input sequences
+are skipped: they don't contribute to the loss term and have zero-gradient.
+END
+ }
+ summary: "Calculates the CTC Loss (log probability) for each batch entry. Also calculates"
+ description: <<END
+the gradient. This class performs the softmax operation for you, so inputs
+should be e.g. linear projections of outputs by an LSTM.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_CacheDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_CacheDataset.pbtxt
new file mode 100644
index 0000000000..6889b8ea14
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_CacheDataset.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "CacheDataset"
+ in_arg {
+ name: "filename"
+ description: <<END
+A path on the filesystem where we should cache the dataset. Note: this
+will be a directory.
+END
+ }
+ summary: "Creates a dataset that caches elements from `input_dataset`."
+ description: <<END
+A CacheDataset will iterate over the input_dataset, and store tensors. If the
+cache already exists, the cache will be used. If the cache is inappropriate
+(e.g. cannot be opened, contains tensors of the wrong shape / size), an error
+will the returned when used.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Cast.pbtxt b/tensorflow/core/api_def/base_api/api_def_Cast.pbtxt
new file mode 100644
index 0000000000..8a0ba505cb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Cast.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Cast"
+ summary: "Cast x of type SrcT to y of DstT."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Ceil.pbtxt b/tensorflow/core/api_def/base_api/api_def_Ceil.pbtxt
new file mode 100644
index 0000000000..ad1ada8d71
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Ceil.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Ceil"
+ summary: "Returns element-wise smallest integer in not less than x."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_CheckNumerics.pbtxt b/tensorflow/core/api_def/base_api/api_def_CheckNumerics.pbtxt
new file mode 100644
index 0000000000..cadf3667e2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_CheckNumerics.pbtxt
@@ -0,0 +1,14 @@
+op {
+ graph_op_name: "CheckNumerics"
+ attr {
+ name: "message"
+ description: <<END
+Prefix of the error message.
+END
+ }
+ summary: "Checks a tensor for NaN and Inf values."
+ description: <<END
+When run, reports an `InvalidArgument` error if `tensor` has any values
+that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Cholesky.pbtxt b/tensorflow/core/api_def/base_api/api_def_Cholesky.pbtxt
new file mode 100644
index 0000000000..713abee630
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Cholesky.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "Cholesky"
+ in_arg {
+ name: "input"
+ description: <<END
+Shape is `[..., M, M]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Shape is `[..., M, M]`.
+END
+ }
+ summary: "Computes the Cholesky decomposition of one or more square matrices."
+ description: <<END
+The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+form square matrices.
+
+The input has to be symmetric and positive definite. Only the lower-triangular
+part of the input will be used for this operation. The upper-triangular part
+will not be read.
+
+The output is a tensor of the same shape as the input
+containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
+
+**Note**: The gradient computation on GPU is faster for large matrices but
+not for large batch dimensions when the submatrices are small. In this
+case it might be faster to use the CPU.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_CholeskyGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_CholeskyGrad.pbtxt
new file mode 100644
index 0000000000..faf5e274b3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_CholeskyGrad.pbtxt
@@ -0,0 +1,30 @@
+op {
+ graph_op_name: "CholeskyGrad"
+ in_arg {
+ name: "l"
+ description: <<END
+Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
+Algorithm depends only on lower triangular part of the innermost matrices of
+this tensor.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+df/dl where f is some scalar function. Shape is `[..., M, M]`.
+Algorithm depends only on lower triangular part of the innermost matrices of
+this tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Symmetrized version of df/dA . Shape is `[..., M, M]`
+END
+ }
+ summary: "Computes the reverse mode backpropagated gradient of the Cholesky algorithm."
+ description: <<END
+For an explanation see "Differentiation of the Cholesky algorithm" by
+Iain Murray http://arxiv.org/abs/1602.07527.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_CompareAndBitpack.pbtxt b/tensorflow/core/api_def/base_api/api_def_CompareAndBitpack.pbtxt
new file mode 100644
index 0000000000..57ba4f8f4c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_CompareAndBitpack.pbtxt
@@ -0,0 +1,53 @@
+op {
+ graph_op_name: "CompareAndBitpack"
+ in_arg {
+ name: "input"
+ description: <<END
+Values to compare against `threshold` and bitpack.
+END
+ }
+ in_arg {
+ name: "threshold"
+ description: <<END
+Threshold to compare against.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The bitpacked comparisons.
+END
+ }
+ attr {
+ name: "T"
+ description: <<END
+The type of the input and threshold.
+END
+ }
+ summary: "Compare values of `input` to `threshold` and pack resulting bits into a `uint8`."
+ description: <<END
+Each comparison returns a boolean `true` (if `input_value > threshold`)
+or and `false` otherwise.
+
+This operation is useful for Locality-Sensitive-Hashing (LSH) and other
+algorithms that use hashing approximations of cosine and `L2` distances;
+codes can be generated from an input via:
+
+```python
+codebook_size = 50
+codebook_bits = codebook_size * 32
+codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
+ dtype=x.dtype,
+ initializer=tf.orthogonal_initializer())
+codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
+codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32
+# now codes has shape x.shape[:-1] + [codebook_size]
+```
+
+**NOTE**: Currently, the innermost dimension of the tensor must be divisible
+by 8.
+
+Given an `input` shaped `[s0, s1, ..., s_n]`, the output is
+a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Complex.pbtxt b/tensorflow/core/api_def/base_api/api_def_Complex.pbtxt
new file mode 100644
index 0000000000..e421d8ce0b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Complex.pbtxt
@@ -0,0 +1,20 @@
+op {
+ graph_op_name: "Complex"
+ summary: "Converts two real numbers to a complex number."
+ description: <<END
+Given a tensor `real` representing the real part of a complex number, and a
+tensor `imag` representing the imaginary part of a complex number, this
+operation returns complex numbers elementwise of the form \\(a + bj\\), where
+*a* represents the `real` part and *b* represents the `imag` part.
+
+The input tensors `real` and `imag` must have the same shape.
+
+For example:
+
+```
+# tensor 'real' is [2.25, 3.25]
+# tensor `imag` is [4.75, 5.75]
+tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ComplexAbs.pbtxt b/tensorflow/core/api_def/base_api/api_def_ComplexAbs.pbtxt
new file mode 100644
index 0000000000..19088f5dfc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ComplexAbs.pbtxt
@@ -0,0 +1,10 @@
+op {
+ graph_op_name: "ComplexAbs"
+ summary: "Computes the complex absolute value of a tensor."
+ description: <<END
+Given a tensor `x` of complex numbers, this operation returns a tensor of type
+`float` or `double` that is the absolute value of each element in `x`. All
+elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
+value is computed as \\( \sqrt{a^2 + b^2}\\).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ComputeAccidentalHits.pbtxt b/tensorflow/core/api_def/base_api/api_def_ComputeAccidentalHits.pbtxt
new file mode 100644
index 0000000000..8cf1e80542
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ComputeAccidentalHits.pbtxt
@@ -0,0 +1,62 @@
+op {
+ graph_op_name: "ComputeAccidentalHits"
+ in_arg {
+ name: "true_classes"
+ description: <<END
+The true_classes output of UnpackSparseLabels.
+END
+ }
+ in_arg {
+ name: "sampled_candidates"
+ description: <<END
+The sampled_candidates output of CandidateSampler.
+END
+ }
+ out_arg {
+ name: "indices"
+ description: <<END
+A vector of indices corresponding to rows of true_candidates.
+END
+ }
+ out_arg {
+ name: "ids"
+ description: <<END
+A vector of IDs of positions in sampled_candidates that match a true_label
+for the row with the corresponding index in indices.
+END
+ }
+ out_arg {
+ name: "weights"
+ description: <<END
+A vector of the same length as indices and ids, in which each element
+is -FLOAT_MAX.
+END
+ }
+ attr {
+ name: "num_true"
+ description: <<END
+Number of true labels per context.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+An second seed to avoid seed collision.
+END
+ }
+ summary: "Computes the ids of the positions in sampled_candidates that match true_labels."
+ description: <<END
+When doing log-odds NCE, the result of this op should be passed through a
+SparseToDense op, then added to the logits of the sampled candidates. This has
+the effect of 'removing' the sampled labels that match the true labels by
+making the classifier sure that they are sampled labels.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Concat.pbtxt b/tensorflow/core/api_def/base_api/api_def_Concat.pbtxt
new file mode 100644
index 0000000000..1bad600e5b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Concat.pbtxt
@@ -0,0 +1,27 @@
+op {
+ graph_op_name: "Concat"
+ visibility: SKIP
+ in_arg {
+ name: "concat_dim"
+ description: <<END
+0-D. The dimension along which to concatenate. Must be in the
+range [0, rank(values)).
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+The `N` Tensors to concatenate. Their ranks and types must match,
+and their sizes must match in all dimensions except `concat_dim`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A `Tensor` with the concatenation of values stacked along the
+`concat_dim` dimension. This tensor's shape matches that of `values` except
+in `concat_dim` where it has the sum of the sizes.
+END
+ }
+ summary: "Concatenates tensors along one dimension."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ConcatOffset.pbtxt b/tensorflow/core/api_def/base_api/api_def_ConcatOffset.pbtxt
new file mode 100644
index 0000000000..84b11715ce
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ConcatOffset.pbtxt
@@ -0,0 +1,36 @@
+op {
+ graph_op_name: "ConcatOffset"
+ visibility: SKIP
+ in_arg {
+ name: "concat_dim"
+ description: <<END
+The dimension along which to concatenate.
+END
+ }
+ in_arg {
+ name: "shape"
+ description: <<END
+The `N` int32 vectors representing shape of tensors being concatenated.
+END
+ }
+ out_arg {
+ name: "offset"
+ description: <<END
+The `N` int32 vectors representing the starting offset
+of input tensors within the concatenated output.
+END
+ }
+ summary: "Computes offsets of concat inputs within its output."
+ description: <<END
+For example:
+
+```
+# 'x' is [2, 2, 7]
+# 'y' is [2, 3, 7]
+# 'z' is [2, 5, 7]
+concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
+```
+
+This is typically used by gradient computations for a concat operation.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ConcatV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ConcatV2.pbtxt
new file mode 100644
index 0000000000..f1a7a81c73
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ConcatV2.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "ConcatV2"
+ endpoint {
+ name: "Concat"
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+List of `N` Tensors to concatenate. Their ranks and types must match,
+and their sizes must match in all dimensions except `concat_dim`.
+END
+ }
+ in_arg {
+ name: "axis"
+ description: <<END
+0-D. The dimension along which to concatenate. Must be in the
+range [-rank(values), rank(values)).
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A `Tensor` with the concatenation of values stacked along the
+`concat_dim` dimension. This tensor's shape matches that of `values` except
+in `concat_dim` where it has the sum of the sizes.
+END
+ }
+ summary: "Concatenates tensors along one dimension."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ConcatenateDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_ConcatenateDataset.pbtxt
new file mode 100644
index 0000000000..67281f9547
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ConcatenateDataset.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ConcatenateDataset"
+ summary: "Creates a dataset that concatenates `input_dataset` with `another_dataset`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ConditionalAccumulator.pbtxt b/tensorflow/core/api_def/base_api/api_def_ConditionalAccumulator.pbtxt
new file mode 100644
index 0000000000..64672e0e58
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ConditionalAccumulator.pbtxt
@@ -0,0 +1,44 @@
+op {
+ graph_op_name: "ConditionalAccumulator"
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the accumulator.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the value being accumulated.
+END
+ }
+ attr {
+ name: "shape"
+ description: <<END
+The shape of the values, can be [], in which case shape is unknown.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this accumulator is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this accumulator will be shared under the
+given name across multiple sessions.
+END
+ }
+ summary: "A conditional accumulator for aggregating gradients."
+ description: <<END
+The accumulator accepts gradients marked with local_step greater or
+equal to the most recent global_step known to the accumulator. The
+average can be extracted from the accumulator, provided sufficient
+gradients have been accumulated. Extracting the average automatically
+resets the aggregate to 0, and increments the global_step recorded by
+the accumulator.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Conj.pbtxt b/tensorflow/core/api_def/base_api/api_def_Conj.pbtxt
new file mode 100644
index 0000000000..e161dc5b15
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Conj.pbtxt
@@ -0,0 +1,19 @@
+op {
+ graph_op_name: "Conj"
+ summary: "Returns the complex conjugate of a complex number."
+ description: <<END
+Given a tensor `input` of complex numbers, this operation returns a tensor of
+complex numbers that are the complex conjugate of each element in `input`. The
+complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
+real part and *b* is the imaginary part.
+
+The complex conjugate returned by this operation is of the form \\(a - bj\\).
+
+For example:
+
+```
+# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ConjugateTranspose.pbtxt b/tensorflow/core/api_def/base_api/api_def_ConjugateTranspose.pbtxt
new file mode 100644
index 0000000000..508c7a8bff
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ConjugateTranspose.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "ConjugateTranspose"
+ summary: "Shuffle dimensions of x according to a permutation and conjugate the result."
+ description: <<END
+The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
+ `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
+ `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Const.pbtxt b/tensorflow/core/api_def/base_api/api_def_Const.pbtxt
new file mode 100644
index 0000000000..0d9e909f89
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Const.pbtxt
@@ -0,0 +1,10 @@
+op {
+ graph_op_name: "Const"
+ attr {
+ name: "value"
+ description: <<END
+Attr `value` is the tensor to return.
+END
+ }
+ summary: "Returns a constant tensor."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ControlTrigger.pbtxt b/tensorflow/core/api_def/base_api/api_def_ControlTrigger.pbtxt
new file mode 100644
index 0000000000..9902e3a784
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ControlTrigger.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "ControlTrigger"
+ summary: "Does nothing. Serves as a control trigger for scheduling."
+ description: <<END
+Only useful as a placeholder for control edges.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Conv2D.pbtxt b/tensorflow/core/api_def/base_api/api_def_Conv2D.pbtxt
new file mode 100644
index 0000000000..6522ce976f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Conv2D.pbtxt
@@ -0,0 +1,72 @@
+op {
+ graph_op_name: "Conv2D"
+ in_arg {
+ name: "input"
+ description: <<END
+A 4-D tensor. The dimension order is interpreted according to the value
+of `data_format`, see below for details.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+A 4-D tensor of shape
+`[filter_height, filter_width, in_channels, out_channels]`
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A 4-D tensor. The dimension order is determined by the value of
+`data_format`, see below for details.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D tensor of length 4. The stride of the sliding window for each
+dimension of `input`. The dimension order is determined by the value of
+ `data_format`, see below for details.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, height, width, channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, channels, height, width].
+END
+ }
+ summary: "Computes a 2-D convolution given 4-D `input` and `filter` tensors."
+ description: <<END
+Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
+and a filter / kernel tensor of shape
+`[filter_height, filter_width, in_channels, out_channels]`, this op
+performs the following:
+
+1. Flattens the filter to a 2-D matrix with shape
+ `[filter_height * filter_width * in_channels, output_channels]`.
+2. Extracts image patches from the input tensor to form a *virtual*
+ tensor of shape `[batch, out_height, out_width,
+ filter_height * filter_width * in_channels]`.
+3. For each patch, right-multiplies the filter matrix and the image patch
+ vector.
+
+In detail, with the default NHWC format,
+
+ output[b, i, j, k] =
+ sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
+ filter[di, dj, q, k]
+
+Must have `strides[0] = strides[3] = 1`. For the most common case of the same
+horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Conv2DBackpropFilter.pbtxt b/tensorflow/core/api_def/base_api/api_def_Conv2DBackpropFilter.pbtxt
new file mode 100644
index 0000000000..4ea3374dbb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Conv2DBackpropFilter.pbtxt
@@ -0,0 +1,57 @@
+op {
+ graph_op_name: "Conv2DBackpropFilter"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D with shape `[batch, in_height, in_width, in_channels]`.
+END
+ }
+ in_arg {
+ name: "filter_sizes"
+ description: <<END
+An integer vector representing the tensor shape of `filter`,
+where `filter` is a 4-D
+`[filter_height, filter_width, in_channels, out_channels]` tensor.
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+4-D with shape `[batch, out_height, out_width, out_channels]`.
+Gradients w.r.t. the output of the convolution.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D with shape
+`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
+the `filter` input of the convolution.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the input
+of the convolution. Must be in the same order as the dimension specified with
+format.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, in_height, in_width, in_channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+END
+ }
+ summary: "Computes the gradients of convolution with respect to the filter."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Conv2DBackpropInput.pbtxt b/tensorflow/core/api_def/base_api/api_def_Conv2DBackpropInput.pbtxt
new file mode 100644
index 0000000000..4420073e38
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Conv2DBackpropInput.pbtxt
@@ -0,0 +1,56 @@
+op {
+ graph_op_name: "Conv2DBackpropInput"
+ in_arg {
+ name: "input_sizes"
+ description: <<END
+An integer vector representing the shape of `input`,
+where `input` is a 4-D `[batch, height, width, channels]` tensor.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+4-D with shape
+`[filter_height, filter_width, in_channels, out_channels]`.
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+4-D with shape `[batch, out_height, out_width, out_channels]`.
+Gradients w.r.t. the output of the convolution.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient
+w.r.t. the input of the convolution.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the input
+of the convolution. Must be in the same order as the dimension specified with
+format.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, in_height, in_width, in_channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+END
+ }
+ summary: "Computes the gradients of convolution with respect to the input."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Conv3D.pbtxt b/tensorflow/core/api_def/base_api/api_def_Conv3D.pbtxt
new file mode 100644
index 0000000000..8f3cd4493c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Conv3D.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "Conv3D"
+ in_arg {
+ name: "input"
+ description: <<END
+Shape `[batch, in_depth, in_height, in_width, in_channels]`.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+Shape `[filter_depth, filter_height, filter_width, in_channels,
+out_channels]`. `in_channels` must match between `input` and `filter`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D tensor of length 5. The stride of the sliding window for each
+dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format of the input and output data. With the
+default format "NDHWC", the data is stored in the order of:
+ [batch, in_depth, in_height, in_width, in_channels].
+Alternatively, the format could be "NCDHW", the data storage order is:
+ [batch, in_channels, in_depth, in_height, in_width].
+END
+ }
+ summary: "Computes a 3-D convolution given 5-D `input` and `filter` tensors."
+ description: <<END
+In signal processing, cross-correlation is a measure of similarity of
+two waveforms as a function of a time-lag applied to one of them. This
+is also known as a sliding dot product or sliding inner-product.
+
+Our Conv3D implements a form of cross-correlation.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropFilter.pbtxt b/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropFilter.pbtxt
new file mode 100644
index 0000000000..3da4a87865
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropFilter.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "Conv3DBackpropFilter"
+ in_arg {
+ name: "input"
+ description: <<END
+Shape `[batch, depth, rows, cols, in_channels]`.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+Shape `[depth, rows, cols, in_channels, out_channels]`.
+`in_channels` must match between `input` and `filter`.
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
+out_channels]`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D tensor of length 5. The stride of the sliding window for each
+dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Computes the gradients of 3-D convolution with respect to the filter."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropFilterV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropFilterV2.pbtxt
new file mode 100644
index 0000000000..6f9b917237
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropFilterV2.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "Conv3DBackpropFilterV2"
+ in_arg {
+ name: "input"
+ description: <<END
+Shape `[batch, depth, rows, cols, in_channels]`.
+END
+ }
+ in_arg {
+ name: "filter_sizes"
+ description: <<END
+An integer vector representing the tensor shape of `filter`,
+where `filter` is a 5-D
+`[filter_depth, filter_height, filter_width, in_channels, out_channels]`
+tensor.
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
+out_channels]`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D tensor of length 5. The stride of the sliding window for each
+dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format of the input and output data. With the
+default format "NDHWC", the data is stored in the order of:
+ [batch, in_depth, in_height, in_width, in_channels].
+Alternatively, the format could be "NCDHW", the data storage order is:
+ [batch, in_channels, in_depth, in_height, in_width].
+END
+ }
+ summary: "Computes the gradients of 3-D convolution with respect to the filter."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropInput.pbtxt b/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropInput.pbtxt
new file mode 100644
index 0000000000..c40a9a91a5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropInput.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "Conv3DBackpropInput"
+ in_arg {
+ name: "input"
+ description: <<END
+Shape `[batch, depth, rows, cols, in_channels]`.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+Shape `[depth, rows, cols, in_channels, out_channels]`.
+`in_channels` must match between `input` and `filter`.
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
+out_channels]`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D tensor of length 5. The stride of the sliding window for each
+dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Computes the gradients of 3-D convolution with respect to the input."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropInputV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropInputV2.pbtxt
new file mode 100644
index 0000000000..19aba156d5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Conv3DBackpropInputV2.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "Conv3DBackpropInputV2"
+ in_arg {
+ name: "input_sizes"
+ description: <<END
+An integer vector representing the tensor shape of `input`,
+where `input` is a 5-D
+`[batch, depth, rows, cols, in_channels]` tensor.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+Shape `[depth, rows, cols, in_channels, out_channels]`.
+`in_channels` must match between `input` and `filter`.
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
+out_channels]`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D tensor of length 5. The stride of the sliding window for each
+dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format of the input and output data. With the
+default format "NDHWC", the data is stored in the order of:
+ [batch, in_depth, in_height, in_width, in_channels].
+Alternatively, the format could be "NCDHW", the data storage order is:
+ [batch, in_channels, in_depth, in_height, in_width].
+END
+ }
+ summary: "Computes the gradients of 3-D convolution with respect to the input."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Cos.pbtxt b/tensorflow/core/api_def/base_api/api_def_Cos.pbtxt
new file mode 100644
index 0000000000..43fb75836f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Cos.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Cos"
+ summary: "Computes cos of x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Cosh.pbtxt b/tensorflow/core/api_def/base_api/api_def_Cosh.pbtxt
new file mode 100644
index 0000000000..aaeb4ccbd5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Cosh.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Cosh"
+ summary: "Computes hyperbolic cosine of x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_CountUpTo.pbtxt b/tensorflow/core/api_def/base_api/api_def_CountUpTo.pbtxt
new file mode 100644
index 0000000000..e7b5e2901a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_CountUpTo.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "CountUpTo"
+ in_arg {
+ name: "ref"
+ description: <<END
+Should be from a scalar `Variable` node.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A copy of the input before increment. If nothing else modifies the
+input, the values produced will all be distinct.
+END
+ }
+ attr {
+ name: "limit"
+ description: <<END
+If incrementing ref would bring it above limit, instead generates an
+'OutOfRange' error.
+END
+ }
+ summary: "Increments \'ref\' until it reaches \'limit\'."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_CropAndResize.pbtxt b/tensorflow/core/api_def/base_api/api_def_CropAndResize.pbtxt
new file mode 100644
index 0000000000..629f575d0a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_CropAndResize.pbtxt
@@ -0,0 +1,74 @@
+op {
+ graph_op_name: "CropAndResize"
+ in_arg {
+ name: "image"
+ description: <<END
+A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
+Both `image_height` and `image_width` need to be positive.
+END
+ }
+ in_arg {
+ name: "boxes"
+ description: <<END
+A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
+specifies the coordinates of a box in the `box_ind[i]` image and is specified
+in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
+`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
+`[0, 1]` interval of normalized image height is mapped to
+`[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in
+which case the sampled crop is an up-down flipped version of the original
+image. The width dimension is treated similarly. Normalized coordinates
+outside the `[0, 1]` range are allowed, in which case we use
+`extrapolation_value` to extrapolate the input image values.
+END
+ }
+ in_arg {
+ name: "box_ind"
+ description: <<END
+A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
+The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
+END
+ }
+ in_arg {
+ name: "crop_size"
+ description: <<END
+A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
+cropped image patches are resized to this size. The aspect ratio of the image
+content is not preserved. Both `crop_height` and `crop_width` need to be
+positive.
+END
+ }
+ out_arg {
+ name: "crops"
+ description: <<END
+A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
+END
+ }
+ attr {
+ name: "method"
+ description: <<END
+A string specifying the interpolation method. Only 'bilinear' is
+supported for now.
+END
+ }
+ attr {
+ name: "extrapolation_value"
+ description: <<END
+Value used for extrapolation, when applicable.
+END
+ }
+ summary: "Extracts crops from the input image tensor and bilinearly resizes them (possibly"
+ description: <<END
+with aspect ratio change) to a common output size specified by `crop_size`. This
+is more general than the `crop_to_bounding_box` op which extracts a fixed size
+slice from the input image and does not allow resizing or aspect ratio change.
+
+Returns a tensor with `crops` from the input `image` at positions defined at the
+bounding box locations in `boxes`. The cropped boxes are all resized (with
+bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
+result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The
+resizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the
+method will give identical results to using `tf.image.resize_bilinear()`
+with `align_corners=True`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_CropAndResizeGradBoxes.pbtxt b/tensorflow/core/api_def/base_api/api_def_CropAndResizeGradBoxes.pbtxt
new file mode 100644
index 0000000000..c03b233efc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_CropAndResizeGradBoxes.pbtxt
@@ -0,0 +1,52 @@
+op {
+ graph_op_name: "CropAndResizeGradBoxes"
+ in_arg {
+ name: "grads"
+ description: <<END
+A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
+END
+ }
+ in_arg {
+ name: "image"
+ description: <<END
+A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
+Both `image_height` and `image_width` need to be positive.
+END
+ }
+ in_arg {
+ name: "boxes"
+ description: <<END
+A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
+specifies the coordinates of a box in the `box_ind[i]` image and is specified
+in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
+`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
+`[0, 1]` interval of normalized image height is mapped to
+`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
+which case the sampled crop is an up-down flipped version of the original
+image. The width dimension is treated similarly. Normalized coordinates
+outside the `[0, 1]` range are allowed, in which case we use
+`extrapolation_value` to extrapolate the input image values.
+END
+ }
+ in_arg {
+ name: "box_ind"
+ description: <<END
+A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
+The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A 2-D tensor of shape `[num_boxes, 4]`.
+END
+ }
+ attr {
+ name: "method"
+ description: <<END
+A string specifying the interpolation method. Only 'bilinear' is
+supported for now.
+END
+ }
+ summary: "Computes the gradient of the crop_and_resize op wrt the input boxes tensor."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_CropAndResizeGradImage.pbtxt b/tensorflow/core/api_def/base_api/api_def_CropAndResizeGradImage.pbtxt
new file mode 100644
index 0000000000..51fb810007
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_CropAndResizeGradImage.pbtxt
@@ -0,0 +1,53 @@
+op {
+ graph_op_name: "CropAndResizeGradImage"
+ in_arg {
+ name: "grads"
+ description: <<END
+A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
+END
+ }
+ in_arg {
+ name: "boxes"
+ description: <<END
+A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
+specifies the coordinates of a box in the `box_ind[i]` image and is specified
+in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
+`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
+`[0, 1]` interval of normalized image height is mapped to
+`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
+which case the sampled crop is an up-down flipped version of the original
+image. The width dimension is treated similarly. Normalized coordinates
+outside the `[0, 1]` range are allowed, in which case we use
+`extrapolation_value` to extrapolate the input image values.
+END
+ }
+ in_arg {
+ name: "box_ind"
+ description: <<END
+A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
+The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
+END
+ }
+ in_arg {
+ name: "image_size"
+ description: <<END
+A 1-D tensor with value `[batch, image_height, image_width, depth]`
+containing the original image size. Both `image_height` and `image_width` need
+to be positive.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
+END
+ }
+ attr {
+ name: "method"
+ description: <<END
+A string specifying the interpolation method. Only 'bilinear' is
+supported for now.
+END
+ }
+ summary: "Computes the gradient of the crop_and_resize op wrt the input image tensor."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Cross.pbtxt b/tensorflow/core/api_def/base_api/api_def_Cross.pbtxt
new file mode 100644
index 0000000000..26c12e459b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Cross.pbtxt
@@ -0,0 +1,27 @@
+op {
+ graph_op_name: "Cross"
+ in_arg {
+ name: "a"
+ description: <<END
+A tensor containing 3-element vectors.
+END
+ }
+ in_arg {
+ name: "b"
+ description: <<END
+Another tensor, of same type and shape as `a`.
+END
+ }
+ out_arg {
+ name: "product"
+ description: <<END
+Pairwise cross product of the vectors in `a` and `b`.
+END
+ }
+ summary: "Compute the pairwise cross product."
+ description: <<END
+`a` and `b` must be the same shape; they can either be simple 3-element vectors,
+or any shape where the innermost dimension is 3. In the latter case, each pair
+of corresponding 3-element vectors is cross-multiplied independently.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Cumprod.pbtxt b/tensorflow/core/api_def/base_api/api_def_Cumprod.pbtxt
new file mode 100644
index 0000000000..96e599365a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Cumprod.pbtxt
@@ -0,0 +1,61 @@
+op {
+ graph_op_name: "Cumprod"
+ in_arg {
+ name: "x"
+ description: <<END
+A `Tensor`. Must be one of the following types: `float32`, `float64`,
+`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
+`complex128`, `qint8`, `quint8`, `qint32`, `half`.
+END
+ }
+ in_arg {
+ name: "axis"
+ description: <<END
+A `Tensor` of type `int32` (default: 0). Must be in the range
+`[-rank(x), rank(x))`.
+END
+ }
+ attr {
+ name: "exclusive"
+ description: <<END
+If `True`, perform exclusive cumprod.
+END
+ }
+ attr {
+ name: "reverse"
+ description: <<END
+A `bool` (default: False).
+END
+ }
+ summary: "Compute the cumulative product of the tensor `x` along `axis`."
+ description: <<END
+By default, this op performs an inclusive cumprod, which means that the first
+element of the input is identical to the first element of the output:
+
+```python
+tf.cumprod([a, b, c]) # => [a, a * b, a * b * c]
+```
+
+By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
+performed instead:
+
+```python
+tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]
+```
+
+By setting the `reverse` kwarg to `True`, the cumprod is performed in the
+opposite direction:
+
+```python
+tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]
+```
+
+This is more efficient than using separate `tf.reverse` ops.
+
+The `reverse` and `exclusive` kwargs can also be combined:
+
+```python
+tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Cumsum.pbtxt b/tensorflow/core/api_def/base_api/api_def_Cumsum.pbtxt
new file mode 100644
index 0000000000..6267f0dfa2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Cumsum.pbtxt
@@ -0,0 +1,61 @@
+op {
+ graph_op_name: "Cumsum"
+ in_arg {
+ name: "x"
+ description: <<END
+A `Tensor`. Must be one of the following types: `float32`, `float64`,
+`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
+`complex128`, `qint8`, `quint8`, `qint32`, `half`.
+END
+ }
+ in_arg {
+ name: "axis"
+ description: <<END
+A `Tensor` of type `int32` (default: 0). Must be in the range
+`[-rank(x), rank(x))`.
+END
+ }
+ attr {
+ name: "exclusive"
+ description: <<END
+If `True`, perform exclusive cumsum.
+END
+ }
+ attr {
+ name: "reverse"
+ description: <<END
+A `bool` (default: False).
+END
+ }
+ summary: "Compute the cumulative sum of the tensor `x` along `axis`."
+ description: <<END
+By default, this op performs an inclusive cumsum, which means that the first
+element of the input is identical to the first element of the output:
+
+```python
+tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
+```
+
+By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
+performed instead:
+
+```python
+tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
+```
+
+By setting the `reverse` kwarg to `True`, the cumsum is performed in the
+opposite direction:
+
+```python
+tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
+```
+
+This is more efficient than using separate `tf.reverse` ops.
+
+The `reverse` and `exclusive` kwargs can also be combined:
+
+```python
+tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_D.pbtxt b/tensorflow/core/api_def/base_api/api_def_D.pbtxt
deleted file mode 100644
index ff8a7223c7..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_D.pbtxt
+++ /dev/null
@@ -1,790 +0,0 @@
-op {
- graph_op_name: "DebugGradientIdentity"
- endpoint {
- name: "DebugGradientIdentity"
- }
- summary: "Identity op for gradient debugging."
- description: <<END
-This op is hidden from public in Python. It is used by TensorFlow Debugger to
-register gradient tensors for gradient debugging.
-END
-}
-op {
- graph_op_name: "DecodeAndCropJpeg"
- endpoint {
- name: "DecodeAndCropJpeg"
- }
- summary: "Decode and Crop a JPEG-encoded image to a uint8 tensor."
- description: <<END
-The attr `channels` indicates the desired number of color channels for the
-decoded image.
-
-Accepted values are:
-
-* 0: Use the number of channels in the JPEG-encoded image.
-* 1: output a grayscale image.
-* 3: output an RGB image.
-
-If needed, the JPEG-encoded image is transformed to match the requested number
-of color channels.
-
-The attr `ratio` allows downscaling the image by an integer factor during
-decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than
-downscaling the image later.
-
-
-It is equivalent to a combination of decode and crop, but much faster by only
-decoding partial jpeg image.
-END
-}
-op {
- graph_op_name: "DecodeBase64"
- endpoint {
- name: "DecodeBase64"
- }
- summary: "Decode web-safe base64-encoded strings."
- description: <<END
-Input may or may not have padding at the end. See EncodeBase64 for padding.
-Web-safe means that input must use - and _ instead of + and /.
-END
-}
-op {
- graph_op_name: "DecodeBmp"
- endpoint {
- name: "DecodeBmp"
- }
- summary: "Decode the first frame of a BMP-encoded image to a uint8 tensor."
- description: <<END
-The attr `channels` indicates the desired number of color channels for the
-decoded image.
-
-Accepted values are:
-
-* 0: Use the number of channels in the BMP-encoded image.
-* 3: output an RGB image.
-* 4: output an RGBA image.
-END
-}
-op {
- graph_op_name: "DecodeCSV"
- endpoint {
- name: "DecodeCSV"
- }
- summary: "Convert CSV records to tensors. Each column maps to one tensor."
- description: <<END
-RFC 4180 format is expected for the CSV records.
-(https://tools.ietf.org/html/rfc4180)
-Note that we allow leading and trailing spaces with int or float field.
-END
-}
-op {
- graph_op_name: "DecodeGif"
- endpoint {
- name: "DecodeGif"
- }
- summary: "Decode the first frame of a GIF-encoded image to a uint8 tensor."
- description: <<END
-GIF with frame or transparency compression are not supported
-convert animated GIF from compressed to uncompressed by:
-
- convert $src.gif -coalesce $dst.gif
-
-This op also supports decoding JPEGs and PNGs, though it is cleaner to use
-`tf.image.decode_image`.
-END
-}
-op {
- graph_op_name: "DecodeJSONExample"
- endpoint {
- name: "DecodeJSONExample"
- }
- summary: "Convert JSON-encoded Example records to binary protocol buffer strings."
- description: <<END
-This op translates a tensor containing Example records, encoded using
-the [standard JSON
-mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),
-into a tensor containing the same records encoded as binary protocol
-buffers. The resulting tensor can then be fed to any of the other
-Example-parsing ops.
-END
-}
-op {
- graph_op_name: "DecodeJpeg"
- endpoint {
- name: "DecodeJpeg"
- }
- summary: "Decode a JPEG-encoded image to a uint8 tensor."
- description: <<END
-The attr `channels` indicates the desired number of color channels for the
-decoded image.
-
-Accepted values are:
-
-* 0: Use the number of channels in the JPEG-encoded image.
-* 1: output a grayscale image.
-* 3: output an RGB image.
-
-If needed, the JPEG-encoded image is transformed to match the requested number
-of color channels.
-
-The attr `ratio` allows downscaling the image by an integer factor during
-decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than
-downscaling the image later.
-
-
-This op also supports decoding PNGs and non-animated GIFs since the interface is
-the same, though it is cleaner to use `tf.image.decode_image`.
-END
-}
-op {
- graph_op_name: "DecodePng"
- endpoint {
- name: "DecodePng"
- }
- summary: "Decode a PNG-encoded image to a uint8 or uint16 tensor."
- description: <<END
-The attr `channels` indicates the desired number of color channels for the
-decoded image.
-
-Accepted values are:
-
-* 0: Use the number of channels in the PNG-encoded image.
-* 1: output a grayscale image.
-* 3: output an RGB image.
-* 4: output an RGBA image.
-
-If needed, the PNG-encoded image is transformed to match the requested number
-of color channels.
-
-This op also supports decoding JPEGs and non-animated GIFs since the interface
-is the same, though it is cleaner to use `tf.image.decode_image`.
-END
-}
-op {
- graph_op_name: "DecodeRaw"
- endpoint {
- name: "DecodeRaw"
- }
- summary: "Reinterpret the bytes of a string as a vector of numbers."
-}
-op {
- graph_op_name: "DecodeWav"
- endpoint {
- name: "DecodeWav"
- }
- summary: "Decode a 16-bit PCM WAV file to a float tensor."
- description: <<END
-The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
-
-When desired_channels is set, if the input contains fewer channels than this
-then the last channel will be duplicated to give the requested number, else if
-the input has more channels than requested then the additional channels will be
-ignored.
-
-If desired_samples is set, then the audio will be cropped or padded with zeroes
-to the requested length.
-
-The first output contains a Tensor with the content of the audio samples. The
-lowest dimension will be the number of channels, and the second will be the
-number of samples. For example, a ten-sample-long stereo WAV file should give an
-output shape of [10, 2].
-END
-}
-op {
- graph_op_name: "DeleteSessionTensor"
- endpoint {
- name: "DeleteSessionTensor"
- }
- summary: "Delete the tensor specified by its handle in the session."
-}
-op {
- graph_op_name: "DenseToDenseSetOperation"
- endpoint {
- name: "DenseToDenseSetOperation"
- }
- summary: "Applies set operation along last dimension of 2 `Tensor` inputs."
- description: <<END
-See SetOperationOp::SetOperationFromContext for values of `set_operation`.
-
-Output `result` is a `SparseTensor` represented by `result_indices`,
-`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
-has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
-dimension contains the result of `set_operation` applied to the corresponding
-`[0...n-1]` dimension of `set`.
-END
-}
-op {
- graph_op_name: "DenseToSparseBatchDataset"
- endpoint {
- name: "DenseToSparseBatchDataset"
- }
- summary: "Creates a dataset that yields a SparseTensor for each element of the input."
-}
-op {
- graph_op_name: "DenseToSparseSetOperation"
- endpoint {
- name: "DenseToSparseSetOperation"
- }
- summary: "Applies set operation along last dimension of `Tensor` and `SparseTensor`."
- description: <<END
-See SetOperationOp::SetOperationFromContext for values of `set_operation`.
-
-Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
-and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
-as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
-ignored.
-
-If `validate_indices` is `True`, this op validates the order and range of `set2`
-indices.
-
-Output `result` is a `SparseTensor` represented by `result_indices`,
-`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
-has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
-dimension contains the result of `set_operation` applied to the corresponding
-`[0...n-1]` dimension of `set`.
-END
-}
-op {
- graph_op_name: "DepthToSpace"
- endpoint {
- name: "DepthToSpace"
- }
- summary: "DepthToSpace for tensors of type T."
- description: <<END
-Rearranges data from depth into blocks of spatial data.
-This is the reverse transformation of SpaceToDepth. More specifically,
-this op outputs a copy of the input tensor where values from the `depth`
-dimension are moved in spatial blocks to the `height` and `width` dimensions.
-The attr `block_size` indicates the input block size and how the data is moved.
-
- * Chunks of data of size `block_size * block_size` from depth are rearranged
- into non-overlapping blocks of size `block_size x block_size`
- * The width the output tensor is `input_depth * block_size`, whereas the
- height is `input_height * block_size`.
- * The Y, X coordinates within each block of the output image are determined
- by the high order component of the input channel index.
- * The depth of the input tensor must be divisible by
- `block_size * block_size`.
-
-The `data_format` attr specifies the layout of the input and output tensors
-with the following options:
- "NHWC": `[ batch, height, width, channels ]`
- "NCHW": `[ batch, channels, height, width ]`
- "NCHW_VECT_C":
- `qint8 [ batch, channels / 4, height, width, channels % 4 ]`
-
-It is useful to consider the operation as transforming a 6-D Tensor.
-e.g. for data_format = NHWC,
- Each element in the input tensor can be specified via 6 coordinates,
- ordered by decreasing memory layout significance as:
- n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates
- within the input image, bX, bY means coordinates
- within the output block, oC means output channels).
- The output would be the input transposed to the following layout:
- n,iY,bY,iX,bX,oC
-
-This operation is useful for resizing the activations between convolutions
-(but keeping all data), e.g. instead of pooling. It is also useful for training
-purely convolutional models.
-
-For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
-block_size = 2:
-
-```
-x = [[[[1, 2, 3, 4]]]]
-
-```
-
-This operation will output a tensor of shape `[1, 2, 2, 1]`:
-
-```
- [[[[1], [2]],
- [[3], [4]]]]
-```
-
-Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
-the corresponding output will have 2x2 elements and will have a depth of
-1 channel (1 = `4 / (block_size * block_size)`).
-The output element shape is `[2, 2, 1]`.
-
-For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
-
-```
-x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
-```
-
-This operation, for block size of 2, will return the following tensor of shape
-`[1, 2, 2, 3]`
-
-```
- [[[[1, 2, 3], [4, 5, 6]],
- [[7, 8, 9], [10, 11, 12]]]]
-
-```
-
-Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
-
-```
-x = [[[[1, 2, 3, 4],
- [5, 6, 7, 8]],
- [[9, 10, 11, 12],
- [13, 14, 15, 16]]]]
-```
-
-the operator will return the following tensor of shape `[1 4 4 1]`:
-
-```
-x = [[[ [1], [2], [5], [6]],
- [ [3], [4], [7], [8]],
- [ [9], [10], [13], [14]],
- [ [11], [12], [15], [16]]]]
-
-```
-END
-}
-op {
- graph_op_name: "DepthwiseConv2dNative"
- endpoint {
- name: "DepthwiseConv2dNative"
- }
- summary: "Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors."
- description: <<END
-Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
-and a filter / kernel tensor of shape
-`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
-`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
-a different filter to each input channel (expanding from 1 channel to
-`channel_multiplier` channels for each), then concatenates the results
-together. Thus, the output has `in_channels * channel_multiplier` channels.
-
-```
-for k in 0..in_channels-1
- for q in 0..channel_multiplier-1
- output[b, i, j, k * channel_multiplier + q] =
- sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
- filter[di, dj, k, q]
-```
-
-Must have `strides[0] = strides[3] = 1`. For the most common case of the same
-horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
-END
-}
-op {
- graph_op_name: "DepthwiseConv2dNativeBackpropFilter"
- endpoint {
- name: "DepthwiseConv2dNativeBackpropFilter"
- }
- summary: "Computes the gradients of depthwise convolution with respect to the filter."
-}
-op {
- graph_op_name: "DepthwiseConv2dNativeBackpropInput"
- endpoint {
- name: "DepthwiseConv2dNativeBackpropInput"
- }
- summary: "Computes the gradients of depthwise convolution with respect to the input."
-}
-op {
- graph_op_name: "Dequantize"
- endpoint {
- name: "Dequantize"
- }
- summary: "Dequantize the \'input\' tensor into a float Tensor."
- description: <<END
-[min_range, max_range] are scalar floats that specify the range for
-the 'input' data. The 'mode' attribute controls exactly which calculations are
-used to convert the float values to their quantized equivalents.
-
-In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
-
-```
-if T == qint8, in[i] += (range(T) + 1)/ 2.0
-out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
-```
-here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
-
-*MIN_COMBINED Mode Example*
-
-If the input comes from a QuantizedRelu6, the output type is
-quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
-0-6. The min_range and max_range values are therefore 0.0 and 6.0.
-Dequantize on quint8 will take each value, cast to float, and multiply
-by 6 / 255.
-Note that if quantizedtype is qint8, the operation will additionally add
-each value by 128 prior to casting.
-
-If the mode is 'MIN_FIRST', then this approach is used:
-
-```c++
-number_of_steps = 1 << (# of bits in T)
-range_adjust = number_of_steps / (number_of_steps - 1)
-range = (range_max - range_min) * range_adjust
-range_scale = range / number_of_steps
-const double offset_input = static_cast<double>(input) - lowest_quantized;
-result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
-```
-
-*SCALED mode Example*
-
-`SCALED` mode matches the quantization approach used in
-`QuantizeAndDequantize{V2|V3}`.
-
-If the mode is `SCALED`, we do not use the full range of the output type,
-choosing to elide the lowest possible value for symmetry (e.g., output range is
--127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
-0.
-
-We first find the range of values in our tensor. The
-range we use is always centered on 0, so we find m such that
-```c++
- m = max(abs(input_min), abs(input_max))
-```
-
-Our input tensor range is then `[-m, m]`.
-
-Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
-If T is signed, this is
-```
- num_bits = sizeof(T) * 8
- [min_fixed, max_fixed] =
- [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
-```
-
-Otherwise, if T is unsigned, the fixed-point range is
-```
- [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
-```
-
-From this we compute our scaling factor, s:
-```c++
- s = (2 * m) / (max_fixed - min_fixed)
-```
-
-Now we can dequantize the elements of our tensor:
-```c++
-result = input * s
-```
-END
-}
-op {
- graph_op_name: "DeserializeManySparse"
- endpoint {
- name: "DeserializeManySparse"
- }
- summary: "Deserialize and concatenate `SparseTensors` from a serialized minibatch."
- description: <<END
-The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
-`N` is the minibatch size and the rows correspond to packed outputs of
-`SerializeSparse`. The ranks of the original `SparseTensor` objects
-must all match. When the final `SparseTensor` is created, it has rank one
-higher than the ranks of the incoming `SparseTensor` objects
-(they have been concatenated along a new row dimension).
-
-The output `SparseTensor` object's shape values for all dimensions but the
-first are the max across the input `SparseTensor` objects' shape values
-for the corresponding dimensions. Its first shape value is `N`, the minibatch
-size.
-
-The input `SparseTensor` objects' indices are assumed ordered in
-standard lexicographic order. If this is not the case, after this
-step run `SparseReorder` to restore index ordering.
-
-For example, if the serialized input is a `[2 x 3]` matrix representing two
-original `SparseTensor` objects:
-
- index = [ 0]
- [10]
- [20]
- values = [1, 2, 3]
- shape = [50]
-
-and
-
- index = [ 2]
- [10]
- values = [4, 5]
- shape = [30]
-
-then the final deserialized `SparseTensor` will be:
-
- index = [0 0]
- [0 10]
- [0 20]
- [1 2]
- [1 10]
- values = [1, 2, 3, 4, 5]
- shape = [2 50]
-END
-}
-op {
- graph_op_name: "DestroyTemporaryVariable"
- endpoint {
- name: "DestroyTemporaryVariable"
- }
- summary: "Destroys the temporary variable and returns its final value."
- description: <<END
-Sets output to the value of the Tensor pointed to by 'ref', then destroys
-the temporary variable called 'var_name'.
-All other uses of 'ref' *must* have executed before this op.
-This is typically achieved by chaining the ref through each assign op, or by
-using control dependencies.
-
-Outputs the final value of the tensor pointed to by 'ref'.
-END
-}
-op {
- graph_op_name: "Diag"
- endpoint {
- name: "Diag"
- }
- summary: "Returns a diagonal tensor with a given diagonal values."
- description: <<END
-Given a `diagonal`, this operation returns a tensor with the `diagonal` and
-everything else padded with zeros. The diagonal is computed as follows:
-
-Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
-rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
-
-`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
-
-For example:
-
-```
-# 'diagonal' is [1, 2, 3, 4]
-tf.diag(diagonal) ==> [[1, 0, 0, 0]
- [0, 2, 0, 0]
- [0, 0, 3, 0]
- [0, 0, 0, 4]]
-```
-END
-}
-op {
- graph_op_name: "DiagPart"
- endpoint {
- name: "DiagPart"
- }
- summary: "Returns the diagonal part of the tensor."
- description: <<END
-This operation returns a tensor with the `diagonal` part
-of the `input`. The `diagonal` part is computed as follows:
-
-Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
-tensor of rank `k` with dimensions `[D1,..., Dk]` where:
-
-`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
-
-For example:
-
-```
-# 'input' is [[1, 0, 0, 0]
- [0, 2, 0, 0]
- [0, 0, 3, 0]
- [0, 0, 0, 4]]
-
-tf.diag_part(input) ==> [1, 2, 3, 4]
-```
-END
-}
-op {
- graph_op_name: "Digamma"
- endpoint {
- name: "Digamma"
- }
- summary: "Computes Psi, the derivative of Lgamma (the log of the absolute value of"
- description: <<END
-`Gamma(x)`), element-wise.
-END
-}
-op {
- graph_op_name: "Dilation2D"
- endpoint {
- name: "Dilation2D"
- }
- summary: "Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors."
- description: <<END
-The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
-`filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
-input channel is processed independently of the others with its own structuring
-function. The `output` tensor has shape
-`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
-tensor depend on the `padding` algorithm. We currently only support the default
-"NHWC" `data_format`.
-
-In detail, the grayscale morphological 2-D dilation is the max-sum correlation
-(for consistency with `conv2d`, we use unmirrored filters):
-
- output[b, y, x, c] =
- max_{dy, dx} input[b,
- strides[1] * y + rates[1] * dy,
- strides[2] * x + rates[2] * dx,
- c] +
- filter[dy, dx, c]
-
-Max-pooling is a special case when the filter has size equal to the pooling
-kernel size and contains all zeros.
-
-Note on duality: The dilation of `input` by the `filter` is equal to the
-negation of the erosion of `-input` by the reflected `filter`.
-END
-}
-op {
- graph_op_name: "Dilation2DBackpropFilter"
- endpoint {
- name: "Dilation2DBackpropFilter"
- }
- summary: "Computes the gradient of morphological 2-D dilation with respect to the filter."
-}
-op {
- graph_op_name: "Dilation2DBackpropInput"
- endpoint {
- name: "Dilation2DBackpropInput"
- }
- summary: "Computes the gradient of morphological 2-D dilation with respect to the input."
-}
-op {
- graph_op_name: "Div"
- endpoint {
- name: "Div"
- }
- summary: "Returns x / y element-wise."
- description: <<END
-*NOTE*: `Div` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "DrawBoundingBoxes"
- endpoint {
- name: "DrawBoundingBoxes"
- }
- summary: "Draw bounding boxes on a batch of images."
- description: <<END
-Outputs a copy of `images` but draws on top of the pixels zero or more bounding
-boxes specified by the locations in `boxes`. The coordinates of the each
-bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
-bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
-height of the underlying image.
-
-For example, if an image is 100 x 200 pixels (height x width) and the bounding
-box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
-the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
-
-Parts of the bounding box may fall outside the image.
-END
-}
-op {
- graph_op_name: "DynamicPartition"
- endpoint {
- name: "DynamicPartition"
- }
- summary: "Partitions `data` into `num_partitions` tensors using indices from `partitions`."
- description: <<END
-For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
-becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i`
-are placed in `outputs[i]` in lexicographic order of `js`, and the first
-dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
-In detail,
-
-```python
- outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
-
- outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
-```
-
-`data.shape` must start with `partitions.shape`.
-
-For example:
-
-```python
- # Scalar partitions.
- partitions = 1
- num_partitions = 2
- data = [10, 20]
- outputs[0] = [] # Empty with shape [0, 2]
- outputs[1] = [[10, 20]]
-
- # Vector partitions.
- partitions = [0, 0, 1, 1, 0]
- num_partitions = 2
- data = [10, 20, 30, 40, 50]
- outputs[0] = [10, 20, 50]
- outputs[1] = [30, 40]
-```
-
-See `dynamic_stitch` for an example on how to merge partitions back.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "DynamicStitch"
- endpoint {
- name: "DynamicStitch"
- }
- summary: "Interleave the values from the `data` tensors into a single tensor."
- description: <<END
-Builds a merged tensor such that
-
-```python
- merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
-```
-
-For example, if each `indices[m]` is scalar or vector, we have
-
-```python
- # Scalar indices:
- merged[indices[m], ...] = data[m][...]
-
- # Vector indices:
- merged[indices[m][i], ...] = data[m][i, ...]
-```
-
-Each `data[i].shape` must start with the corresponding `indices[i].shape`,
-and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
-must have `data[i].shape = indices[i].shape + constant`. In terms of this
-`constant`, the output shape is
-
- merged.shape = [max(indices)] + constant
-
-Values are merged in order, so if an index appears in both `indices[m][i]` and
-`indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
-merged result. If you do not need this guarantee, ParallelDynamicStitch might
-perform better on some devices.
-
-For example:
-
-```python
- indices[0] = 6
- indices[1] = [4, 1]
- indices[2] = [[5, 2], [0, 3]]
- data[0] = [61, 62]
- data[1] = [[41, 42], [11, 12]]
- data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
- merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
- [51, 52], [61, 62]]
-```
-
-This method can be used to merge partitions created by `dynamic_partition`
-as illustrated on the following example:
-
-```python
- # Apply function (increments x_i) on elements for which a certain condition
- # apply (x_i != -1 in this example).
- x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
- condition_mask=tf.not_equal(x,tf.constant(-1.))
- partitioned_data = tf.dynamic_partition(
- x, tf.cast(condition_mask, tf.int32) , 2)
- partitioned_data[1] = partitioned_data[1] + 1.0
- condition_indices = tf.dynamic_partition(
- tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
- x = tf.dynamic_stitch(condition_indices, partitioned_data)
- # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
- # unchanged.
-```
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
-</div>
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_DatasetToSingleElement.pbtxt b/tensorflow/core/api_def/base_api/api_def_DatasetToSingleElement.pbtxt
new file mode 100644
index 0000000000..2b9dffd883
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DatasetToSingleElement.pbtxt
@@ -0,0 +1,16 @@
+op {
+ graph_op_name: "DatasetToSingleElement"
+ in_arg {
+ name: "dataset"
+ description: <<END
+A handle to a dataset that contains a single element.
+END
+ }
+ out_arg {
+ name: "components"
+ description: <<END
+The components of the single element of `input`.
+END
+ }
+ summary: "Outputs the single element from the given dataset."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DebugGradientIdentity.pbtxt b/tensorflow/core/api_def/base_api/api_def_DebugGradientIdentity.pbtxt
new file mode 100644
index 0000000000..38fd6877e9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DebugGradientIdentity.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "DebugGradientIdentity"
+ summary: "Identity op for gradient debugging."
+ description: <<END
+This op is hidden from public in Python. It is used by TensorFlow Debugger to
+register gradient tensors for gradient debugging.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DecodeAndCropJpeg.pbtxt b/tensorflow/core/api_def/base_api/api_def_DecodeAndCropJpeg.pbtxt
new file mode 100644
index 0000000000..28318274f3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DecodeAndCropJpeg.pbtxt
@@ -0,0 +1,86 @@
+op {
+ graph_op_name: "DecodeAndCropJpeg"
+ in_arg {
+ name: "contents"
+ description: <<END
+0-D. The JPEG-encoded image.
+END
+ }
+ in_arg {
+ name: "crop_window"
+ description: <<END
+1-D. The crop window: [crop_y, crop_x, crop_height, crop_width].
+END
+ }
+ out_arg {
+ name: "image"
+ description: <<END
+3-D with shape `[height, width, channels]`..
+END
+ }
+ attr {
+ name: "channels"
+ description: <<END
+Number of color channels for the decoded image.
+END
+ }
+ attr {
+ name: "ratio"
+ description: <<END
+Downscaling ratio.
+END
+ }
+ attr {
+ name: "fancy_upscaling"
+ description: <<END
+If true use a slower but nicer upscaling of the
+chroma planes (yuv420/422 only).
+END
+ }
+ attr {
+ name: "try_recover_truncated"
+ description: <<END
+If true try to recover an image from truncated input.
+END
+ }
+ attr {
+ name: "acceptable_fraction"
+ description: <<END
+The minimum required fraction of lines before a truncated
+input is accepted.
+END
+ }
+ attr {
+ name: "dct_method"
+ description: <<END
+string specifying a hint about the algorithm used for
+decompression. Defaults to "" which maps to a system-specific
+default. Currently valid values are ["INTEGER_FAST",
+"INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal
+jpeg library changes to a version that does not have that specific
+option.)
+END
+ }
+ summary: "Decode and Crop a JPEG-encoded image to a uint8 tensor."
+ description: <<END
+The attr `channels` indicates the desired number of color channels for the
+decoded image.
+
+Accepted values are:
+
+* 0: Use the number of channels in the JPEG-encoded image.
+* 1: output a grayscale image.
+* 3: output an RGB image.
+
+If needed, the JPEG-encoded image is transformed to match the requested number
+of color channels.
+
+The attr `ratio` allows downscaling the image by an integer factor during
+decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than
+downscaling the image later.
+
+
+It is equivalent to a combination of decode and crop, but much faster by only
+decoding partial jpeg image.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DecodeBase64.pbtxt b/tensorflow/core/api_def/base_api/api_def_DecodeBase64.pbtxt
new file mode 100644
index 0000000000..6bae3a62d7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DecodeBase64.pbtxt
@@ -0,0 +1,20 @@
+op {
+ graph_op_name: "DecodeBase64"
+ in_arg {
+ name: "input"
+ description: <<END
+Base64 strings to decode.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Decoded strings.
+END
+ }
+ summary: "Decode web-safe base64-encoded strings."
+ description: <<END
+Input may or may not have padding at the end. See EncodeBase64 for padding.
+Web-safe means that input must use - and _ instead of + and /.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DecodeBmp.pbtxt b/tensorflow/core/api_def/base_api/api_def_DecodeBmp.pbtxt
new file mode 100644
index 0000000000..3c6918e6a0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DecodeBmp.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "DecodeBmp"
+ in_arg {
+ name: "contents"
+ description: <<END
+0-D. The BMP-encoded image.
+END
+ }
+ out_arg {
+ name: "image"
+ description: <<END
+3-D with shape `[height, width, channels]`. RGB order
+END
+ }
+ summary: "Decode the first frame of a BMP-encoded image to a uint8 tensor."
+ description: <<END
+The attr `channels` indicates the desired number of color channels for the
+decoded image.
+
+Accepted values are:
+
+* 0: Use the number of channels in the BMP-encoded image.
+* 3: output an RGB image.
+* 4: output an RGBA image.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DecodeCSV.pbtxt b/tensorflow/core/api_def/base_api/api_def_DecodeCSV.pbtxt
new file mode 100644
index 0000000000..e39213cbc7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DecodeCSV.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "DecodeCSV"
+ in_arg {
+ name: "records"
+ description: <<END
+Each string is a record/row in the csv and all records should have
+the same format.
+END
+ }
+ in_arg {
+ name: "record_defaults"
+ description: <<END
+One tensor per column of the input record, with either a
+scalar default value for that column or empty if the column is required.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Each tensor will have the same shape as records.
+END
+ }
+ attr {
+ name: "field_delim"
+ description: <<END
+char delimiter to separate fields in a record.
+END
+ }
+ attr {
+ name: "use_quote_delim"
+ description: <<END
+If false, treats double quotation marks as regular
+characters inside of the string fields (ignoring RFC 4180, Section 2,
+Bullet 5).
+END
+ }
+ attr {
+ name: "na_value"
+ description: <<END
+Additional string to recognize as NA/NaN.
+END
+ }
+ summary: "Convert CSV records to tensors. Each column maps to one tensor."
+ description: <<END
+RFC 4180 format is expected for the CSV records.
+(https://tools.ietf.org/html/rfc4180)
+Note that we allow leading and trailing spaces with int or float field.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DecodeGif.pbtxt b/tensorflow/core/api_def/base_api/api_def_DecodeGif.pbtxt
new file mode 100644
index 0000000000..a90b734155
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DecodeGif.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "DecodeGif"
+ in_arg {
+ name: "contents"
+ description: <<END
+0-D. The GIF-encoded image.
+END
+ }
+ out_arg {
+ name: "image"
+ description: <<END
+4-D with shape `[num_frames, height, width, 3]`. RGB order
+END
+ }
+ summary: "Decode the first frame of a GIF-encoded image to a uint8 tensor."
+ description: <<END
+GIF with frame or transparency compression are not supported
+convert animated GIF from compressed to uncompressed by:
+
+ convert $src.gif -coalesce $dst.gif
+
+This op also supports decoding JPEGs and PNGs, though it is cleaner to use
+`tf.image.decode_image`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DecodeJSONExample.pbtxt b/tensorflow/core/api_def/base_api/api_def_DecodeJSONExample.pbtxt
new file mode 100644
index 0000000000..cdf1c5f37d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DecodeJSONExample.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "DecodeJSONExample"
+ in_arg {
+ name: "json_examples"
+ description: <<END
+Each string is a JSON object serialized according to the JSON
+mapping of the Example proto.
+END
+ }
+ out_arg {
+ name: "binary_examples"
+ description: <<END
+Each string is a binary Example protocol buffer corresponding
+to the respective element of `json_examples`.
+END
+ }
+ summary: "Convert JSON-encoded Example records to binary protocol buffer strings."
+ description: <<END
+This op translates a tensor containing Example records, encoded using
+the [standard JSON
+mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),
+into a tensor containing the same records encoded as binary protocol
+buffers. The resulting tensor can then be fed to any of the other
+Example-parsing ops.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DecodeJpeg.pbtxt b/tensorflow/core/api_def/base_api/api_def_DecodeJpeg.pbtxt
new file mode 100644
index 0000000000..b9521370d3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DecodeJpeg.pbtxt
@@ -0,0 +1,80 @@
+op {
+ graph_op_name: "DecodeJpeg"
+ in_arg {
+ name: "contents"
+ description: <<END
+0-D. The JPEG-encoded image.
+END
+ }
+ out_arg {
+ name: "image"
+ description: <<END
+3-D with shape `[height, width, channels]`..
+END
+ }
+ attr {
+ name: "channels"
+ description: <<END
+Number of color channels for the decoded image.
+END
+ }
+ attr {
+ name: "ratio"
+ description: <<END
+Downscaling ratio.
+END
+ }
+ attr {
+ name: "fancy_upscaling"
+ description: <<END
+If true use a slower but nicer upscaling of the
+chroma planes (yuv420/422 only).
+END
+ }
+ attr {
+ name: "try_recover_truncated"
+ description: <<END
+If true try to recover an image from truncated input.
+END
+ }
+ attr {
+ name: "acceptable_fraction"
+ description: <<END
+The minimum required fraction of lines before a truncated
+input is accepted.
+END
+ }
+ attr {
+ name: "dct_method"
+ description: <<END
+string specifying a hint about the algorithm used for
+decompression. Defaults to "" which maps to a system-specific
+default. Currently valid values are ["INTEGER_FAST",
+"INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal
+jpeg library changes to a version that does not have that specific
+option.)
+END
+ }
+ summary: "Decode a JPEG-encoded image to a uint8 tensor."
+ description: <<END
+The attr `channels` indicates the desired number of color channels for the
+decoded image.
+
+Accepted values are:
+
+* 0: Use the number of channels in the JPEG-encoded image.
+* 1: output a grayscale image.
+* 3: output an RGB image.
+
+If needed, the JPEG-encoded image is transformed to match the requested number
+of color channels.
+
+The attr `ratio` allows downscaling the image by an integer factor during
+decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than
+downscaling the image later.
+
+
+This op also supports decoding PNGs and non-animated GIFs since the interface is
+the same, though it is cleaner to use `tf.image.decode_image`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DecodePng.pbtxt b/tensorflow/core/api_def/base_api/api_def_DecodePng.pbtxt
new file mode 100644
index 0000000000..63404db800
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DecodePng.pbtxt
@@ -0,0 +1,39 @@
+op {
+ graph_op_name: "DecodePng"
+ in_arg {
+ name: "contents"
+ description: <<END
+0-D. The PNG-encoded image.
+END
+ }
+ out_arg {
+ name: "image"
+ description: <<END
+3-D with shape `[height, width, channels]`.
+END
+ }
+ attr {
+ name: "channels"
+ description: <<END
+Number of color channels for the decoded image.
+END
+ }
+ summary: "Decode a PNG-encoded image to a uint8 or uint16 tensor."
+ description: <<END
+The attr `channels` indicates the desired number of color channels for the
+decoded image.
+
+Accepted values are:
+
+* 0: Use the number of channels in the PNG-encoded image.
+* 1: output a grayscale image.
+* 3: output an RGB image.
+* 4: output an RGBA image.
+
+If needed, the PNG-encoded image is transformed to match the requested number
+of color channels.
+
+This op also supports decoding JPEGs and non-animated GIFs since the interface
+is the same, though it is cleaner to use `tf.image.decode_image`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DecodeRaw.pbtxt b/tensorflow/core/api_def/base_api/api_def_DecodeRaw.pbtxt
new file mode 100644
index 0000000000..27ca061013
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DecodeRaw.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "DecodeRaw"
+ in_arg {
+ name: "bytes"
+ description: <<END
+All the elements must have the same length.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A Tensor with one more dimension than the input `bytes`. The
+added dimension will have size equal to the length of the elements
+of `bytes` divided by the number of bytes to represent `out_type`.
+END
+ }
+ attr {
+ name: "little_endian"
+ description: <<END
+Whether the input `bytes` are in little-endian order.
+Ignored for `out_type` values that are stored in a single byte like
+`uint8`.
+END
+ }
+ summary: "Reinterpret the bytes of a string as a vector of numbers."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DecodeWav.pbtxt b/tensorflow/core/api_def/base_api/api_def_DecodeWav.pbtxt
new file mode 100644
index 0000000000..9f055e73d3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DecodeWav.pbtxt
@@ -0,0 +1,50 @@
+op {
+ graph_op_name: "DecodeWav"
+ in_arg {
+ name: "contents"
+ description: <<END
+The WAV-encoded audio, usually from a file.
+END
+ }
+ out_arg {
+ name: "audio"
+ description: <<END
+2-D with shape `[length, channels]`.
+END
+ }
+ out_arg {
+ name: "sample_rate"
+ description: <<END
+Scalar holding the sample rate found in the WAV header.
+END
+ }
+ attr {
+ name: "desired_channels"
+ description: <<END
+Number of sample channels wanted.
+END
+ }
+ attr {
+ name: "desired_samples"
+ description: <<END
+Length of audio requested.
+END
+ }
+ summary: "Decode a 16-bit PCM WAV file to a float tensor."
+ description: <<END
+The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
+
+When desired_channels is set, if the input contains fewer channels than this
+then the last channel will be duplicated to give the requested number, else if
+the input has more channels than requested then the additional channels will be
+ignored.
+
+If desired_samples is set, then the audio will be cropped or padded with zeroes
+to the requested length.
+
+The first output contains a Tensor with the content of the audio samples. The
+lowest dimension will be the number of channels, and the second will be the
+number of samples. For example, a ten-sample-long stereo WAV file should give an
+output shape of [10, 2].
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DeleteSessionTensor.pbtxt b/tensorflow/core/api_def/base_api/api_def_DeleteSessionTensor.pbtxt
new file mode 100644
index 0000000000..16aaa7a802
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DeleteSessionTensor.pbtxt
@@ -0,0 +1,10 @@
+op {
+ graph_op_name: "DeleteSessionTensor"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle for a tensor stored in the session state.
+END
+ }
+ summary: "Delete the tensor specified by its handle in the session."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DenseToDenseSetOperation.pbtxt b/tensorflow/core/api_def/base_api/api_def_DenseToDenseSetOperation.pbtxt
new file mode 100644
index 0000000000..b8a469de95
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DenseToDenseSetOperation.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "DenseToDenseSetOperation"
+ in_arg {
+ name: "set1"
+ description: <<END
+`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
+Dimension `n` contains values in a set, duplicates are allowed but ignored.
+END
+ }
+ in_arg {
+ name: "set2"
+ description: <<END
+`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
+Dimension `n` contains values in a set, duplicates are allowed but ignored.
+END
+ }
+ out_arg {
+ name: "result_indices"
+ description: <<END
+2D indices of a `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "result_values"
+ description: <<END
+1D values of a `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "result_shape"
+ description: <<END
+1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
+the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
+is the max result set size across all `0...n-1` dimensions.
+END
+ }
+ summary: "Applies set operation along last dimension of 2 `Tensor` inputs."
+ description: <<END
+See SetOperationOp::SetOperationFromContext for values of `set_operation`.
+
+Output `result` is a `SparseTensor` represented by `result_indices`,
+`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
+has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
+dimension contains the result of `set_operation` applied to the corresponding
+`[0...n-1]` dimension of `set`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DenseToSparseBatchDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_DenseToSparseBatchDataset.pbtxt
new file mode 100644
index 0000000000..f2f5594c7c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DenseToSparseBatchDataset.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "DenseToSparseBatchDataset"
+ in_arg {
+ name: "input_dataset"
+ description: <<END
+A handle to an input dataset. Must have a single component.
+END
+ }
+ in_arg {
+ name: "batch_size"
+ description: <<END
+A scalar representing the number of elements to accumulate in a
+batch.
+END
+ }
+ in_arg {
+ name: "row_shape"
+ description: <<END
+A vector representing the dense shape of each row in the produced
+SparseTensor. The shape may be partially specified, using `-1` to indicate
+that a particular dimension should use the maximum size of all batch elements.
+END
+ }
+ summary: "Creates a dataset that yields a SparseTensor for each element of the input."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DenseToSparseSetOperation.pbtxt b/tensorflow/core/api_def/base_api/api_def_DenseToSparseSetOperation.pbtxt
new file mode 100644
index 0000000000..a4b0866373
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DenseToSparseSetOperation.pbtxt
@@ -0,0 +1,70 @@
+op {
+ graph_op_name: "DenseToSparseSetOperation"
+ in_arg {
+ name: "set1"
+ description: <<END
+`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
+Dimension `n` contains values in a set, duplicates are allowed but ignored.
+END
+ }
+ in_arg {
+ name: "set2_indices"
+ description: <<END
+2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
+order.
+END
+ }
+ in_arg {
+ name: "set2_values"
+ description: <<END
+1D `Tensor`, values of a `SparseTensor`. Must be in row-major
+order.
+END
+ }
+ in_arg {
+ name: "set2_shape"
+ description: <<END
+1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
+be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
+max set size across `n-1` dimensions.
+END
+ }
+ out_arg {
+ name: "result_indices"
+ description: <<END
+2D indices of a `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "result_values"
+ description: <<END
+1D values of a `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "result_shape"
+ description: <<END
+1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
+the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
+is the max result set size across all `0...n-1` dimensions.
+END
+ }
+ summary: "Applies set operation along last dimension of `Tensor` and `SparseTensor`."
+ description: <<END
+See SetOperationOp::SetOperationFromContext for values of `set_operation`.
+
+Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
+and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
+as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
+ignored.
+
+If `validate_indices` is `True`, this op validates the order and range of `set2`
+indices.
+
+Output `result` is a `SparseTensor` represented by `result_indices`,
+`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
+has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
+dimension contains the result of `set_operation` applied to the corresponding
+`[0...n-1]` dimension of `set`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DepthToSpace.pbtxt b/tensorflow/core/api_def/base_api/api_def_DepthToSpace.pbtxt
new file mode 100644
index 0000000000..e7a18cd6b4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DepthToSpace.pbtxt
@@ -0,0 +1,101 @@
+op {
+ graph_op_name: "DepthToSpace"
+ attr {
+ name: "block_size"
+ description: <<END
+The size of the spatial block, same as in Space2Depth.
+END
+ }
+ summary: "DepthToSpace for tensors of type T."
+ description: <<END
+Rearranges data from depth into blocks of spatial data.
+This is the reverse transformation of SpaceToDepth. More specifically,
+this op outputs a copy of the input tensor where values from the `depth`
+dimension are moved in spatial blocks to the `height` and `width` dimensions.
+The attr `block_size` indicates the input block size and how the data is moved.
+
+ * Chunks of data of size `block_size * block_size` from depth are rearranged
+ into non-overlapping blocks of size `block_size x block_size`
+ * The width the output tensor is `input_depth * block_size`, whereas the
+ height is `input_height * block_size`.
+ * The Y, X coordinates within each block of the output image are determined
+ by the high order component of the input channel index.
+ * The depth of the input tensor must be divisible by
+ `block_size * block_size`.
+
+The `data_format` attr specifies the layout of the input and output tensors
+with the following options:
+ "NHWC": `[ batch, height, width, channels ]`
+ "NCHW": `[ batch, channels, height, width ]`
+ "NCHW_VECT_C":
+ `qint8 [ batch, channels / 4, height, width, channels % 4 ]`
+
+It is useful to consider the operation as transforming a 6-D Tensor.
+e.g. for data_format = NHWC,
+ Each element in the input tensor can be specified via 6 coordinates,
+ ordered by decreasing memory layout significance as:
+ n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates
+ within the input image, bX, bY means coordinates
+ within the output block, oC means output channels).
+ The output would be the input transposed to the following layout:
+ n,iY,bY,iX,bX,oC
+
+This operation is useful for resizing the activations between convolutions
+(but keeping all data), e.g. instead of pooling. It is also useful for training
+purely convolutional models.
+
+For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
+block_size = 2:
+
+```
+x = [[[[1, 2, 3, 4]]]]
+
+```
+
+This operation will output a tensor of shape `[1, 2, 2, 1]`:
+
+```
+ [[[[1], [2]],
+ [[3], [4]]]]
+```
+
+Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
+the corresponding output will have 2x2 elements and will have a depth of
+1 channel (1 = `4 / (block_size * block_size)`).
+The output element shape is `[2, 2, 1]`.
+
+For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
+
+```
+x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
+```
+
+This operation, for block size of 2, will return the following tensor of shape
+`[1, 2, 2, 3]`
+
+```
+ [[[[1, 2, 3], [4, 5, 6]],
+ [[7, 8, 9], [10, 11, 12]]]]
+
+```
+
+Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
+
+```
+x = [[[[1, 2, 3, 4],
+ [5, 6, 7, 8]],
+ [[9, 10, 11, 12],
+ [13, 14, 15, 16]]]]
+```
+
+the operator will return the following tensor of shape `[1 4 4 1]`:
+
+```
+x = [[[ [1], [2], [5], [6]],
+ [ [3], [4], [7], [8]],
+ [ [9], [10], [13], [14]],
+ [ [11], [12], [15], [16]]]]
+
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNative.pbtxt b/tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNative.pbtxt
new file mode 100644
index 0000000000..cc10ebe923
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNative.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "DepthwiseConv2dNative"
+ attr {
+ name: "strides"
+ description: <<END
+1-D of length 4. The stride of the sliding window for each dimension
+of `input`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, height, width, channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, channels, height, width].
+END
+ }
+ summary: "Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors."
+ description: <<END
+Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
+and a filter / kernel tensor of shape
+`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
+`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
+a different filter to each input channel (expanding from 1 channel to
+`channel_multiplier` channels for each), then concatenates the results
+together. Thus, the output has `in_channels * channel_multiplier` channels.
+
+```
+for k in 0..in_channels-1
+ for q in 0..channel_multiplier-1
+ output[b, i, j, k * channel_multiplier + q] =
+ sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+ filter[di, dj, k, q]
+```
+
+Must have `strides[0] = strides[3] = 1`. For the most common case of the same
+horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt b/tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt
new file mode 100644
index 0000000000..9126be2afa
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt
@@ -0,0 +1,60 @@
+op {
+ graph_op_name: "DepthwiseConv2dNativeBackpropFilter"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D with shape based on `data_format`. For example, if
+`data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
+in_width, in_channels]` tensor.
+END
+ }
+ in_arg {
+ name: "filter_sizes"
+ description: <<END
+An integer vector representing the tensor shape of `filter`,
+where `filter` is a 4-D
+`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+4-D with shape based on `data_format`.
+For example, if `data_format` is 'NHWC' then
+out_backprop shape is `[batch, out_height, out_width, out_channels]`.
+Gradients w.r.t. the output of the convolution.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D with shape
+`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
+the `filter` input of the convolution.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the input
+of the convolution.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, height, width, channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, channels, height, width].
+END
+ }
+ summary: "Computes the gradients of depthwise convolution with respect to the filter."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt b/tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt
new file mode 100644
index 0000000000..f1d16858db
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt
@@ -0,0 +1,60 @@
+op {
+ graph_op_name: "DepthwiseConv2dNativeBackpropInput"
+ in_arg {
+ name: "input_sizes"
+ description: <<END
+An integer vector representing the shape of `input`, based
+on `data_format`. For example, if `data_format` is 'NHWC' then
+ `input` is a 4-D `[batch, height, width, channels]` tensor.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+4-D with shape
+`[filter_height, filter_width, in_channels, depthwise_multiplier]`.
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+4-D with shape based on `data_format`.
+For example, if `data_format` is 'NHWC' then
+out_backprop shape is `[batch, out_height, out_width, out_channels]`.
+Gradients w.r.t. the output of the convolution.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D with shape according to `data_format`. For example, if
+`data_format` is 'NHWC', output shape is `[batch, in_height,
+in_width, in_channels]`. Gradient w.r.t. the input of the
+convolution.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the input
+of the convolution.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, height, width, channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, channels, height, width].
+END
+ }
+ summary: "Computes the gradients of depthwise convolution with respect to the input."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Dequantize.pbtxt b/tensorflow/core/api_def/base_api/api_def_Dequantize.pbtxt
new file mode 100644
index 0000000000..40c00ef58f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Dequantize.pbtxt
@@ -0,0 +1,91 @@
+op {
+ graph_op_name: "Dequantize"
+ in_arg {
+ name: "min_range"
+ description: <<END
+The minimum scalar value possibly produced for the input.
+END
+ }
+ in_arg {
+ name: "max_range"
+ description: <<END
+The maximum scalar value possibly produced for the input.
+END
+ }
+ summary: "Dequantize the \'input\' tensor into a float Tensor."
+ description: <<END
+[min_range, max_range] are scalar floats that specify the range for
+the 'input' data. The 'mode' attribute controls exactly which calculations are
+used to convert the float values to their quantized equivalents.
+
+In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
+
+```
+if T == qint8, in[i] += (range(T) + 1)/ 2.0
+out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
+```
+here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
+
+*MIN_COMBINED Mode Example*
+
+If the input comes from a QuantizedRelu6, the output type is
+quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
+0-6. The min_range and max_range values are therefore 0.0 and 6.0.
+Dequantize on quint8 will take each value, cast to float, and multiply
+by 6 / 255.
+Note that if quantizedtype is qint8, the operation will additionally add
+each value by 128 prior to casting.
+
+If the mode is 'MIN_FIRST', then this approach is used:
+
+```c++
+num_discrete_values = 1 << (# of bits in T)
+range_adjust = num_discrete_values / (num_discrete_values - 1)
+range = (range_max - range_min) * range_adjust
+range_scale = range / num_discrete_values
+const double offset_input = static_cast<double>(input) - lowest_quantized;
+result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
+```
+
+*SCALED mode Example*
+
+`SCALED` mode matches the quantization approach used in
+`QuantizeAndDequantize{V2|V3}`.
+
+If the mode is `SCALED`, we do not use the full range of the output type,
+choosing to elide the lowest possible value for symmetry (e.g., output range is
+-127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
+0.
+
+We first find the range of values in our tensor. The
+range we use is always centered on 0, so we find m such that
+```c++
+ m = max(abs(input_min), abs(input_max))
+```
+
+Our input tensor range is then `[-m, m]`.
+
+Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
+If T is signed, this is
+```
+ num_bits = sizeof(T) * 8
+ [min_fixed, max_fixed] =
+ [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
+```
+
+Otherwise, if T is unsigned, the fixed-point range is
+```
+ [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
+```
+
+From this we compute our scaling factor, s:
+```c++
+ s = (2 * m) / (max_fixed - min_fixed)
+```
+
+Now we can dequantize the elements of our tensor:
+```c++
+result = input * s
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DeserializeIterator.pbtxt b/tensorflow/core/api_def/base_api/api_def_DeserializeIterator.pbtxt
new file mode 100644
index 0000000000..653f6789db
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DeserializeIterator.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "DeserializeIterator"
+ in_arg {
+ name: "resource_handle"
+ description: <<END
+A handle to an iterator resource.
+END
+ }
+ in_arg {
+ name: "serialized"
+ description: <<END
+A variant tensor storing the state of the iterator contained in the
+resource.
+END
+ }
+ summary: "Converts the given variant tensor to an iterator and stores it in the given resource."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DeserializeManySparse.pbtxt b/tensorflow/core/api_def/base_api/api_def_DeserializeManySparse.pbtxt
new file mode 100644
index 0000000000..b1fb5eae02
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DeserializeManySparse.pbtxt
@@ -0,0 +1,60 @@
+op {
+ graph_op_name: "DeserializeManySparse"
+ in_arg {
+ name: "serialized_sparse"
+ description: <<END
+2-D, The `N` serialized `SparseTensor` objects.
+Must have 3 columns.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The `dtype` of the serialized `SparseTensor` objects.
+END
+ }
+ summary: "Deserialize and concatenate `SparseTensors` from a serialized minibatch."
+ description: <<END
+The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
+`N` is the minibatch size and the rows correspond to packed outputs of
+`SerializeSparse`. The ranks of the original `SparseTensor` objects
+must all match. When the final `SparseTensor` is created, it has rank one
+higher than the ranks of the incoming `SparseTensor` objects
+(they have been concatenated along a new row dimension).
+
+The output `SparseTensor` object's shape values for all dimensions but the
+first are the max across the input `SparseTensor` objects' shape values
+for the corresponding dimensions. Its first shape value is `N`, the minibatch
+size.
+
+The input `SparseTensor` objects' indices are assumed ordered in
+standard lexicographic order. If this is not the case, after this
+step run `SparseReorder` to restore index ordering.
+
+For example, if the serialized input is a `[2 x 3]` matrix representing two
+original `SparseTensor` objects:
+
+ index = [ 0]
+ [10]
+ [20]
+ values = [1, 2, 3]
+ shape = [50]
+
+and
+
+ index = [ 2]
+ [10]
+ values = [4, 5]
+ shape = [30]
+
+then the final deserialized `SparseTensor` will be:
+
+ index = [0 0]
+ [0 10]
+ [0 20]
+ [1 2]
+ [1 10]
+ values = [1, 2, 3, 4, 5]
+ shape = [2 50]
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DestroyResourceOp.pbtxt b/tensorflow/core/api_def/base_api/api_def_DestroyResourceOp.pbtxt
new file mode 100644
index 0000000000..910d25ec82
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DestroyResourceOp.pbtxt
@@ -0,0 +1,21 @@
+op {
+ graph_op_name: "DestroyResourceOp"
+ in_arg {
+ name: "resource"
+ description: <<END
+handle to the resource to delete.
+END
+ }
+ attr {
+ name: "ignore_lookup_error"
+ description: <<END
+whether to ignore the error when the resource
+doesn't exist.
+END
+ }
+ summary: "Deletes the resource specified by the handle."
+ description: <<END
+All subsequent operations using the resource will result in a NotFound
+error status.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DestroyTemporaryVariable.pbtxt b/tensorflow/core/api_def/base_api/api_def_DestroyTemporaryVariable.pbtxt
new file mode 100644
index 0000000000..2ae9a30cb4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DestroyTemporaryVariable.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "DestroyTemporaryVariable"
+ in_arg {
+ name: "ref"
+ description: <<END
+A reference to the temporary variable tensor.
+END
+ }
+ attr {
+ name: "var_name"
+ description: <<END
+Name of the temporary variable, usually the name of the matching
+'TemporaryVariable' op.
+END
+ }
+ summary: "Destroys the temporary variable and returns its final value."
+ description: <<END
+Sets output to the value of the Tensor pointed to by 'ref', then destroys
+the temporary variable called 'var_name'.
+All other uses of 'ref' *must* have executed before this op.
+This is typically achieved by chaining the ref through each assign op, or by
+using control dependencies.
+
+Outputs the final value of the tensor pointed to by 'ref'.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Diag.pbtxt b/tensorflow/core/api_def/base_api/api_def_Diag.pbtxt
new file mode 100644
index 0000000000..e69d9077f9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Diag.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "Diag"
+ in_arg {
+ name: "diagonal"
+ description: <<END
+Rank k tensor where k is at most 1.
+END
+ }
+ summary: "Returns a diagonal tensor with a given diagonal values."
+ description: <<END
+Given a `diagonal`, this operation returns a tensor with the `diagonal` and
+everything else padded with zeros. The diagonal is computed as follows:
+
+Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
+rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
+
+`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
+
+For example:
+
+```
+# 'diagonal' is [1, 2, 3, 4]
+tf.diag(diagonal) ==> [[1, 0, 0, 0]
+ [0, 2, 0, 0]
+ [0, 0, 3, 0]
+ [0, 0, 0, 4]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DiagPart.pbtxt b/tensorflow/core/api_def/base_api/api_def_DiagPart.pbtxt
new file mode 100644
index 0000000000..1af7df95b7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DiagPart.pbtxt
@@ -0,0 +1,36 @@
+op {
+ graph_op_name: "DiagPart"
+ in_arg {
+ name: "input"
+ description: <<END
+Rank k tensor where k is even and not zero.
+END
+ }
+ out_arg {
+ name: "diagonal"
+ description: <<END
+The extracted diagonal.
+END
+ }
+ summary: "Returns the diagonal part of the tensor."
+ description: <<END
+This operation returns a tensor with the `diagonal` part
+of the `input`. The `diagonal` part is computed as follows:
+
+Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
+tensor of rank `k` with dimensions `[D1,..., Dk]` where:
+
+`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
+
+For example:
+
+```
+# 'input' is [[1, 0, 0, 0]
+ [0, 2, 0, 0]
+ [0, 0, 3, 0]
+ [0, 0, 0, 4]]
+
+tf.diag_part(input) ==> [1, 2, 3, 4]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Digamma.pbtxt b/tensorflow/core/api_def/base_api/api_def_Digamma.pbtxt
new file mode 100644
index 0000000000..0a8280701b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Digamma.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "Digamma"
+ summary: "Computes Psi, the derivative of Lgamma (the log of the absolute value of"
+ description: <<END
+`Gamma(x)`), element-wise.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Dilation2D.pbtxt b/tensorflow/core/api_def/base_api/api_def_Dilation2D.pbtxt
new file mode 100644
index 0000000000..b38f5aa4f9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Dilation2D.pbtxt
@@ -0,0 +1,67 @@
+op {
+ graph_op_name: "Dilation2D"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D with shape `[batch, in_height, in_width, depth]`.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+3-D with shape `[filter_height, filter_width, depth]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D with shape `[batch, out_height, out_width, depth]`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the input
+tensor. Must be: `[1, stride_height, stride_width, 1]`.
+END
+ }
+ attr {
+ name: "rates"
+ description: <<END
+The input stride for atrous morphological dilation. Must be:
+`[1, rate_height, rate_width, 1]`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors."
+ description: <<END
+The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
+`filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
+input channel is processed independently of the others with its own structuring
+function. The `output` tensor has shape
+`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
+tensor depend on the `padding` algorithm. We currently only support the default
+"NHWC" `data_format`.
+
+In detail, the grayscale morphological 2-D dilation is the max-sum correlation
+(for consistency with `conv2d`, we use unmirrored filters):
+
+ output[b, y, x, c] =
+ max_{dy, dx} input[b,
+ strides[1] * y + rates[1] * dy,
+ strides[2] * x + rates[2] * dx,
+ c] +
+ filter[dy, dx, c]
+
+Max-pooling is a special case when the filter has size equal to the pooling
+kernel size and contains all zeros.
+
+Note on duality: The dilation of `input` by the `filter` is equal to the
+negation of the erosion of `-input` by the reflected `filter`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Dilation2DBackpropFilter.pbtxt b/tensorflow/core/api_def/base_api/api_def_Dilation2DBackpropFilter.pbtxt
new file mode 100644
index 0000000000..a58f3b48ed
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Dilation2DBackpropFilter.pbtxt
@@ -0,0 +1,48 @@
+op {
+ graph_op_name: "Dilation2DBackpropFilter"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D with shape `[batch, in_height, in_width, depth]`.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+3-D with shape `[filter_height, filter_width, depth]`.
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+4-D with shape `[batch, out_height, out_width, depth]`.
+END
+ }
+ out_arg {
+ name: "filter_backprop"
+ description: <<END
+3-D with shape `[filter_height, filter_width, depth]`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D of length 4. The stride of the sliding window for each dimension of
+the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
+END
+ }
+ attr {
+ name: "rates"
+ description: <<END
+1-D of length 4. The input stride for atrous morphological dilation.
+Must be: `[1, rate_height, rate_width, 1]`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Computes the gradient of morphological 2-D dilation with respect to the filter."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Dilation2DBackpropInput.pbtxt b/tensorflow/core/api_def/base_api/api_def_Dilation2DBackpropInput.pbtxt
new file mode 100644
index 0000000000..0f966c1aae
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Dilation2DBackpropInput.pbtxt
@@ -0,0 +1,48 @@
+op {
+ graph_op_name: "Dilation2DBackpropInput"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D with shape `[batch, in_height, in_width, depth]`.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+3-D with shape `[filter_height, filter_width, depth]`.
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+4-D with shape `[batch, out_height, out_width, depth]`.
+END
+ }
+ out_arg {
+ name: "in_backprop"
+ description: <<END
+4-D with shape `[batch, in_height, in_width, depth]`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D of length 4. The stride of the sliding window for each dimension of
+the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
+END
+ }
+ attr {
+ name: "rates"
+ description: <<END
+1-D of length 4. The input stride for atrous morphological dilation.
+Must be: `[1, rate_height, rate_width, 1]`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Computes the gradient of morphological 2-D dilation with respect to the input."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Div.pbtxt b/tensorflow/core/api_def/base_api/api_def_Div.pbtxt
new file mode 100644
index 0000000000..12b6fb5b4c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Div.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Div"
+ summary: "Returns x / y element-wise."
+ description: <<END
+*NOTE*: `Div` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DrawBoundingBoxes.pbtxt b/tensorflow/core/api_def/base_api/api_def_DrawBoundingBoxes.pbtxt
new file mode 100644
index 0000000000..6c3ae09f5d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DrawBoundingBoxes.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "DrawBoundingBoxes"
+ in_arg {
+ name: "images"
+ description: <<END
+4-D with shape `[batch, height, width, depth]`. A batch of images.
+END
+ }
+ in_arg {
+ name: "boxes"
+ description: <<END
+3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
+boxes.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D with the same shape as `images`. The batch of input images with
+bounding boxes drawn on the images.
+END
+ }
+ summary: "Draw bounding boxes on a batch of images."
+ description: <<END
+Outputs a copy of `images` but draws on top of the pixels zero or more bounding
+boxes specified by the locations in `boxes`. The coordinates of the each
+bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
+bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
+height of the underlying image.
+
+For example, if an image is 100 x 200 pixels (height x width) and the bounding
+box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
+the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
+
+Parts of the bounding box may fall outside the image.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DynamicPartition.pbtxt b/tensorflow/core/api_def/base_api/api_def_DynamicPartition.pbtxt
new file mode 100644
index 0000000000..b5c44b5e07
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DynamicPartition.pbtxt
@@ -0,0 +1,55 @@
+op {
+ graph_op_name: "DynamicPartition"
+ in_arg {
+ name: "partitions"
+ description: <<END
+Any shape. Indices in the range `[0, num_partitions)`.
+END
+ }
+ attr {
+ name: "num_partitions"
+ description: <<END
+The number of partitions to output.
+END
+ }
+ summary: "Partitions `data` into `num_partitions` tensors using indices from `partitions`."
+ description: <<END
+For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
+becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i`
+are placed in `outputs[i]` in lexicographic order of `js`, and the first
+dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
+In detail,
+
+```python
+ outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
+
+ outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
+```
+
+`data.shape` must start with `partitions.shape`.
+
+For example:
+
+```python
+ # Scalar partitions.
+ partitions = 1
+ num_partitions = 2
+ data = [10, 20]
+ outputs[0] = [] # Empty with shape [0, 2]
+ outputs[1] = [[10, 20]]
+
+ # Vector partitions.
+ partitions = [0, 0, 1, 1, 0]
+ num_partitions = 2
+ data = [10, 20, 30, 40, 50]
+ outputs[0] = [10, 20, 50]
+ outputs[1] = [30, 40]
+```
+
+See `dynamic_stitch` for an example on how to merge partitions back.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_DynamicStitch.pbtxt b/tensorflow/core/api_def/base_api/api_def_DynamicStitch.pbtxt
new file mode 100644
index 0000000000..34bd77bc0e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_DynamicStitch.pbtxt
@@ -0,0 +1,68 @@
+op {
+ graph_op_name: "DynamicStitch"
+ summary: "Interleave the values from the `data` tensors into a single tensor."
+ description: <<END
+Builds a merged tensor such that
+
+```python
+ merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
+```
+
+For example, if each `indices[m]` is scalar or vector, we have
+
+```python
+ # Scalar indices:
+ merged[indices[m], ...] = data[m][...]
+
+ # Vector indices:
+ merged[indices[m][i], ...] = data[m][i, ...]
+```
+
+Each `data[i].shape` must start with the corresponding `indices[i].shape`,
+and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
+must have `data[i].shape = indices[i].shape + constant`. In terms of this
+`constant`, the output shape is
+
+ merged.shape = [max(indices)] + constant
+
+Values are merged in order, so if an index appears in both `indices[m][i]` and
+`indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
+merged result. If you do not need this guarantee, ParallelDynamicStitch might
+perform better on some devices.
+
+For example:
+
+```python
+ indices[0] = 6
+ indices[1] = [4, 1]
+ indices[2] = [[5, 2], [0, 3]]
+ data[0] = [61, 62]
+ data[1] = [[41, 42], [11, 12]]
+ data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
+ merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
+ [51, 52], [61, 62]]
+```
+
+This method can be used to merge partitions created by `dynamic_partition`
+as illustrated on the following example:
+
+```python
+ # Apply function (increments x_i) on elements for which a certain condition
+ # apply (x_i != -1 in this example).
+ x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
+ condition_mask=tf.not_equal(x,tf.constant(-1.))
+ partitioned_data = tf.dynamic_partition(
+ x, tf.cast(condition_mask, tf.int32) , 2)
+ partitioned_data[1] = partitioned_data[1] + 1.0
+ condition_indices = tf.dynamic_partition(
+ tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
+ x = tf.dynamic_stitch(condition_indices, partitioned_data)
+ # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
+ # unchanged.
+```
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_E.pbtxt b/tensorflow/core/api_def/base_api/api_def_E.pbtxt
deleted file mode 100644
index b49146f7c4..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_E.pbtxt
+++ /dev/null
@@ -1,261 +0,0 @@
-op {
- graph_op_name: "EditDistance"
- endpoint {
- name: "EditDistance"
- }
- summary: "Computes the (possibly normalized) Levenshtein Edit Distance."
- description: <<END
-The inputs are variable-length sequences provided by SparseTensors
- (hypothesis_indices, hypothesis_values, hypothesis_shape)
-and
- (truth_indices, truth_values, truth_shape).
-
-The inputs are:
-END
-}
-op {
- graph_op_name: "Elu"
- endpoint {
- name: "Elu"
- }
- summary: "Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise."
- description: <<END
-See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
-](http://arxiv.org/abs/1511.07289)
-END
-}
-op {
- graph_op_name: "EluGrad"
- endpoint {
- name: "EluGrad"
- }
- summary: "Computes gradients for the exponential linear (Elu) operation."
-}
-op {
- graph_op_name: "EncodeBase64"
- endpoint {
- name: "EncodeBase64"
- }
- summary: "Encode strings into web-safe base64 format."
- description: <<END
-Refer to the following article for more information on base64 format:
-en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
-end so that the encoded has length multiple of 4. See Padding section of the
-link above.
-
-Web-safe means that the encoder uses - and _ instead of + and /.
-END
-}
-op {
- graph_op_name: "EncodeJpeg"
- endpoint {
- name: "EncodeJpeg"
- }
- summary: "JPEG-encode an image."
- description: <<END
-`image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
-
-The attr `format` can be used to override the color format of the encoded
-output. Values can be:
-
-* `''`: Use a default format based on the number of channels in the image.
-* `grayscale`: Output a grayscale JPEG image. The `channels` dimension
- of `image` must be 1.
-* `rgb`: Output an RGB JPEG image. The `channels` dimension
- of `image` must be 3.
-
-If `format` is not specified or is the empty string, a default format is picked
-in function of the number of channels in `image`:
-
-* 1: Output a grayscale image.
-* 3: Output an RGB image.
-END
-}
-op {
- graph_op_name: "EncodePng"
- endpoint {
- name: "EncodePng"
- }
- summary: "PNG-encode an image."
- description: <<END
-`image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
-where `channels` is:
-
-* 1: for grayscale.
-* 2: for grayscale + alpha.
-* 3: for RGB.
-* 4: for RGBA.
-
-The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
-default or a value from 0 to 9. 9 is the highest compression level, generating
-the smallest output, but is slower.
-END
-}
-op {
- graph_op_name: "EncodeWav"
- endpoint {
- name: "EncodeWav"
- }
- summary: "Encode audio data using the WAV file format."
- description: <<END
-This operation will generate a string suitable to be saved out to create a .wav
-audio file. It will be encoded in the 16-bit PCM format. It takes in float
-values in the range -1.0f to 1.0f, and any outside that value will be clamped to
-that range.
-
-`audio` is a 2-D float Tensor of shape `[length, channels]`.
-`sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
-END
-}
-op {
- graph_op_name: "Enter"
- endpoint {
- name: "Enter"
- }
- summary: "Creates or finds a child frame, and makes `data` available to the child frame."
- description: <<END
-This op is used together with `Exit` to create loops in the graph.
-The unique `frame_name` is used by the `Executor` to identify frames. If
-`is_constant` is true, `output` is a constant in the child frame; otherwise
-it may be changed in the child frame. At most `parallel_iterations` iterations
-are run in parallel in the child frame.
-END
-}
-op {
- graph_op_name: "Equal"
- endpoint {
- name: "Equal"
- }
- summary: "Returns the truth value of (x == y) element-wise."
- description: <<END
-*NOTE*: `Equal` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "Erf"
- endpoint {
- name: "Erf"
- }
- summary: "Computes the Gauss error function of `x` element-wise."
-}
-op {
- graph_op_name: "Erfc"
- endpoint {
- name: "Erfc"
- }
- summary: "Computes the complementary error function of `x` element-wise."
-}
-op {
- graph_op_name: "Exit"
- endpoint {
- name: "Exit"
- }
- summary: "Exits the current frame to its parent frame."
- description: <<END
-Exit makes its input `data` available to the parent frame.
-END
-}
-op {
- graph_op_name: "Exp"
- endpoint {
- name: "Exp"
- }
- summary: "Computes exponential of x element-wise. \\\\(y = e^x\\\\)."
-}
-op {
- graph_op_name: "ExpandDims"
- endpoint {
- name: "ExpandDims"
- }
- summary: "Inserts a dimension of 1 into a tensor\'s shape."
- description: <<END
-Given a tensor `input`, this operation inserts a dimension of 1 at the
-dimension index `dim` of `input`'s shape. The dimension index `dim` starts at
-zero; if you specify a negative number for `dim` it is counted backward from
-the end.
-
-This operation is useful if you want to add a batch dimension to a single
-element. For example, if you have a single image of shape `[height, width,
-channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
-which will make the shape `[1, height, width, channels]`.
-
-Other examples:
-
-```
-# 't' is a tensor of shape [2]
-shape(expand_dims(t, 0)) ==> [1, 2]
-shape(expand_dims(t, 1)) ==> [2, 1]
-shape(expand_dims(t, -1)) ==> [2, 1]
-
-# 't2' is a tensor of shape [2, 3, 5]
-shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
-shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
-shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
-```
-
-This operation requires that:
-
-`-1-input.dims() <= dim <= input.dims()`
-
-This operation is related to `squeeze()`, which removes dimensions of
-size 1.
-END
-}
-op {
- graph_op_name: "Expm1"
- endpoint {
- name: "Expm1"
- }
- summary: "Computes exponential of x - 1 element-wise."
- description: <<END
-I.e., \\(y = (\exp x) - 1\\).
-END
-}
-op {
- graph_op_name: "ExtractGlimpse"
- endpoint {
- name: "ExtractGlimpse"
- }
- summary: "Extracts a glimpse from the input tensor."
- description: <<END
-Returns a set of windows called glimpses extracted at location
-`offsets` from the input tensor. If the windows only partially
-overlaps the inputs, the non overlapping areas will be filled with
-random noise.
-
-The result is a 4-D tensor of shape `[batch_size, glimpse_height,
-glimpse_width, channels]`. The channels and batch dimensions are the
-same as that of the input tensor. The height and width of the output
-windows are specified in the `size` parameter.
-
-The argument `normalized` and `centered` controls how the windows are built:
-
-* If the coordinates are normalized but not centered, 0.0 and 1.0
- correspond to the minimum and maximum of each height and width
- dimension.
-* If the coordinates are both normalized and centered, they range from
- -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
- left corner, the lower right corner is located at (1.0, 1.0) and the
- center is at (0, 0).
-* If the coordinates are not normalized they are interpreted as
- numbers of pixels.
-END
-}
-op {
- graph_op_name: "ExtractImagePatches"
- endpoint {
- name: "ExtractImagePatches"
- }
- summary: "Extract `patches` from `images` and put them in the \"depth\" output dimension."
-}
-op {
- graph_op_name: "ExtractJpegShape"
- endpoint {
- name: "ExtractJpegShape"
- }
- summary: "Extract the shape information of a JPEG-encoded image."
- description: <<END
-This op only parses the image header, so it is much faster than DecodeJpeg.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_EditDistance.pbtxt b/tensorflow/core/api_def/base_api/api_def_EditDistance.pbtxt
new file mode 100644
index 0000000000..678c451a8a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_EditDistance.pbtxt
@@ -0,0 +1,96 @@
+op {
+ graph_op_name: "EditDistance"
+ in_arg {
+ name: "hypothesis_indices"
+ description: <<END
+The indices of the hypothesis list SparseTensor.
+This is an N x R int64 matrix.
+END
+ }
+ in_arg {
+ name: "hypothesis_values"
+ description: <<END
+The values of the hypothesis list SparseTensor.
+This is an N-length vector.
+END
+ }
+ in_arg {
+ name: "hypothesis_shape"
+ description: <<END
+The shape of the hypothesis list SparseTensor.
+This is an R-length vector.
+END
+ }
+ in_arg {
+ name: "truth_indices"
+ description: <<END
+The indices of the truth list SparseTensor.
+This is an M x R int64 matrix.
+END
+ }
+ in_arg {
+ name: "truth_values"
+ description: <<END
+The values of the truth list SparseTensor.
+This is an M-length vector.
+END
+ }
+ in_arg {
+ name: "truth_shape"
+ description: <<END
+truth indices, vector.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A dense float tensor with rank R - 1.
+
+For the example input:
+
+ // hypothesis represents a 2x1 matrix with variable-length values:
+ // (0,0) = ["a"]
+ // (1,0) = ["b"]
+ hypothesis_indices = [[0, 0, 0],
+ [1, 0, 0]]
+ hypothesis_values = ["a", "b"]
+ hypothesis_shape = [2, 1, 1]
+
+ // truth represents a 2x2 matrix with variable-length values:
+ // (0,0) = []
+ // (0,1) = ["a"]
+ // (1,0) = ["b", "c"]
+ // (1,1) = ["a"]
+ truth_indices = [[0, 1, 0],
+ [1, 0, 0],
+ [1, 0, 1],
+ [1, 1, 0]]
+ truth_values = ["a", "b", "c", "a"]
+ truth_shape = [2, 2, 2]
+ normalize = true
+
+The output will be:
+
+ // output is a 2x2 matrix with edit distances normalized by truth lengths.
+ output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis
+ [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis
+END
+ }
+ attr {
+ name: "normalize"
+ description: <<END
+boolean (if true, edit distances are normalized by length of truth).
+
+The output is:
+END
+ }
+ summary: "Computes the (possibly normalized) Levenshtein Edit Distance."
+ description: <<END
+The inputs are variable-length sequences provided by SparseTensors
+ (hypothesis_indices, hypothesis_values, hypothesis_shape)
+and
+ (truth_indices, truth_values, truth_shape).
+
+The inputs are:
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Elu.pbtxt b/tensorflow/core/api_def/base_api/api_def_Elu.pbtxt
new file mode 100644
index 0000000000..cf3d4b73d3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Elu.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Elu"
+ summary: "Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise."
+ description: <<END
+See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
+](http://arxiv.org/abs/1511.07289)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_EluGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_EluGrad.pbtxt
new file mode 100644
index 0000000000..41aa5a2ac7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_EluGrad.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "EluGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "gradients"
+ description: <<END
+The backpropagated gradients to the corresponding Elu operation.
+END
+ }
+ in_arg {
+ name: "outputs"
+ description: <<END
+The outputs of the corresponding Elu operation.
+END
+ }
+ out_arg {
+ name: "backprops"
+ description: <<END
+The gradients: `gradients * (outputs + 1)` if outputs < 0,
+`gradients` otherwise.
+END
+ }
+ summary: "Computes gradients for the exponential linear (Elu) operation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_EncodeBase64.pbtxt b/tensorflow/core/api_def/base_api/api_def_EncodeBase64.pbtxt
new file mode 100644
index 0000000000..f25fe05cfd
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_EncodeBase64.pbtxt
@@ -0,0 +1,30 @@
+op {
+ graph_op_name: "EncodeBase64"
+ in_arg {
+ name: "input"
+ description: <<END
+Strings to be encoded.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Input strings encoded in base64.
+END
+ }
+ attr {
+ name: "pad"
+ description: <<END
+Bool whether padding is applied at the ends.
+END
+ }
+ summary: "Encode strings into web-safe base64 format."
+ description: <<END
+Refer to the following article for more information on base64 format:
+en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
+end so that the encoded has length multiple of 4. See Padding section of the
+link above.
+
+Web-safe means that the encoder uses - and _ instead of + and /.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_EncodeJpeg.pbtxt b/tensorflow/core/api_def/base_api/api_def_EncodeJpeg.pbtxt
new file mode 100644
index 0000000000..05a46ed291
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_EncodeJpeg.pbtxt
@@ -0,0 +1,89 @@
+op {
+ graph_op_name: "EncodeJpeg"
+ in_arg {
+ name: "image"
+ description: <<END
+3-D with shape `[height, width, channels]`.
+END
+ }
+ out_arg {
+ name: "contents"
+ description: <<END
+0-D. JPEG-encoded image.
+END
+ }
+ attr {
+ name: "format"
+ description: <<END
+Per pixel image format.
+END
+ }
+ attr {
+ name: "quality"
+ description: <<END
+Quality of the compression from 0 to 100 (higher is better and slower).
+END
+ }
+ attr {
+ name: "progressive"
+ description: <<END
+If True, create a JPEG that loads progressively (coarse to fine).
+END
+ }
+ attr {
+ name: "optimize_size"
+ description: <<END
+If True, spend CPU/RAM to reduce size with no quality change.
+END
+ }
+ attr {
+ name: "chroma_downsampling"
+ description: <<END
+See http://en.wikipedia.org/wiki/Chroma_subsampling.
+END
+ }
+ attr {
+ name: "density_unit"
+ description: <<END
+Unit used to specify `x_density` and `y_density`:
+pixels per inch (`'in'`) or centimeter (`'cm'`).
+END
+ }
+ attr {
+ name: "x_density"
+ description: <<END
+Horizontal pixels per density unit.
+END
+ }
+ attr {
+ name: "y_density"
+ description: <<END
+Vertical pixels per density unit.
+END
+ }
+ attr {
+ name: "xmp_metadata"
+ description: <<END
+If not empty, embed this XMP metadata in the image header.
+END
+ }
+ summary: "JPEG-encode an image."
+ description: <<END
+`image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
+
+The attr `format` can be used to override the color format of the encoded
+output. Values can be:
+
+* `''`: Use a default format based on the number of channels in the image.
+* `grayscale`: Output a grayscale JPEG image. The `channels` dimension
+ of `image` must be 1.
+* `rgb`: Output an RGB JPEG image. The `channels` dimension
+ of `image` must be 3.
+
+If `format` is not specified or is the empty string, a default format is picked
+in function of the number of channels in `image`:
+
+* 1: Output a grayscale image.
+* 3: Output an RGB image.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_EncodePng.pbtxt b/tensorflow/core/api_def/base_api/api_def_EncodePng.pbtxt
new file mode 100644
index 0000000000..9c7d3b3733
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_EncodePng.pbtxt
@@ -0,0 +1,35 @@
+op {
+ graph_op_name: "EncodePng"
+ in_arg {
+ name: "image"
+ description: <<END
+3-D with shape `[height, width, channels]`.
+END
+ }
+ out_arg {
+ name: "contents"
+ description: <<END
+0-D. PNG-encoded image.
+END
+ }
+ attr {
+ name: "compression"
+ description: <<END
+Compression level.
+END
+ }
+ summary: "PNG-encode an image."
+ description: <<END
+`image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
+where `channels` is:
+
+* 1: for grayscale.
+* 2: for grayscale + alpha.
+* 3: for RGB.
+* 4: for RGBA.
+
+The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
+default or a value from 0 to 9. 9 is the highest compression level, generating
+the smallest output, but is slower.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_EncodeWav.pbtxt b/tensorflow/core/api_def/base_api/api_def_EncodeWav.pbtxt
new file mode 100644
index 0000000000..54a8b1fa55
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_EncodeWav.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "EncodeWav"
+ in_arg {
+ name: "audio"
+ description: <<END
+2-D with shape `[length, channels]`.
+END
+ }
+ in_arg {
+ name: "sample_rate"
+ description: <<END
+Scalar containing the sample frequency.
+END
+ }
+ out_arg {
+ name: "contents"
+ description: <<END
+0-D. WAV-encoded file contents.
+END
+ }
+ summary: "Encode audio data using the WAV file format."
+ description: <<END
+This operation will generate a string suitable to be saved out to create a .wav
+audio file. It will be encoded in the 16-bit PCM format. It takes in float
+values in the range -1.0f to 1.0f, and any outside that value will be clamped to
+that range.
+
+`audio` is a 2-D float Tensor of shape `[length, channels]`.
+`sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Enter.pbtxt b/tensorflow/core/api_def/base_api/api_def_Enter.pbtxt
new file mode 100644
index 0000000000..dfff8e6ddb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Enter.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "Enter"
+ visibility: HIDDEN
+ in_arg {
+ name: "data"
+ description: <<END
+The tensor to be made available to the child frame.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The same tensor as `data`.
+END
+ }
+ attr {
+ name: "frame_name"
+ description: <<END
+The name of the child frame.
+END
+ }
+ attr {
+ name: "is_constant"
+ description: <<END
+If true, the output is constant within the child frame.
+END
+ }
+ attr {
+ name: "parallel_iterations"
+ description: <<END
+The number of iterations allowed to run in parallel.
+END
+ }
+ summary: "Creates or finds a child frame, and makes `data` available to the child frame."
+ description: <<END
+This op is used together with `Exit` to create loops in the graph.
+The unique `frame_name` is used by the `Executor` to identify frames. If
+`is_constant` is true, `output` is a constant in the child frame; otherwise
+it may be changed in the child frame. At most `parallel_iterations` iterations
+are run in parallel in the child frame.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Equal.pbtxt b/tensorflow/core/api_def/base_api/api_def_Equal.pbtxt
new file mode 100644
index 0000000000..6ca8ef9455
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Equal.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Equal"
+ summary: "Returns the truth value of (x == y) element-wise."
+ description: <<END
+*NOTE*: `Equal` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Erf.pbtxt b/tensorflow/core/api_def/base_api/api_def_Erf.pbtxt
new file mode 100644
index 0000000000..408df8a633
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Erf.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Erf"
+ summary: "Computes the Gauss error function of `x` element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Erfc.pbtxt b/tensorflow/core/api_def/base_api/api_def_Erfc.pbtxt
new file mode 100644
index 0000000000..ad70def47f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Erfc.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Erfc"
+ summary: "Computes the complementary error function of `x` element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Exit.pbtxt b/tensorflow/core/api_def/base_api/api_def_Exit.pbtxt
new file mode 100644
index 0000000000..ec97b7ac04
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Exit.pbtxt
@@ -0,0 +1,20 @@
+op {
+ graph_op_name: "Exit"
+ visibility: HIDDEN
+ in_arg {
+ name: "data"
+ description: <<END
+The tensor to be made available to the parent frame.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The same tensor as `data`.
+END
+ }
+ summary: "Exits the current frame to its parent frame."
+ description: <<END
+Exit makes its input `data` available to the parent frame.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Exp.pbtxt b/tensorflow/core/api_def/base_api/api_def_Exp.pbtxt
new file mode 100644
index 0000000000..dd1e3d5dfc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Exp.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Exp"
+ summary: "Computes exponential of x element-wise. \\\\(y = e^x\\\\)."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ExpandDims.pbtxt b/tensorflow/core/api_def/base_api/api_def_ExpandDims.pbtxt
new file mode 100644
index 0000000000..7b9a03f0ea
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ExpandDims.pbtxt
@@ -0,0 +1,52 @@
+op {
+ graph_op_name: "ExpandDims"
+ in_arg {
+ name: "dim"
+ rename_to: "axis"
+ description: <<END
+0-D (scalar). Specifies the dimension index at which to
+expand the shape of `input`. Must be in the range
+`[-rank(input) - 1, rank(input)]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Contains the same data as `input`, but its shape has an additional
+dimension of size 1 added.
+END
+ }
+ summary: "Inserts a dimension of 1 into a tensor\'s shape."
+ description: <<END
+Given a tensor `input`, this operation inserts a dimension of 1 at the
+dimension index `dim` of `input`'s shape. The dimension index `dim` starts at
+zero; if you specify a negative number for `dim` it is counted backward from
+the end.
+
+This operation is useful if you want to add a batch dimension to a single
+element. For example, if you have a single image of shape `[height, width,
+channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
+which will make the shape `[1, height, width, channels]`.
+
+Other examples:
+
+```
+# 't' is a tensor of shape [2]
+shape(expand_dims(t, 0)) ==> [1, 2]
+shape(expand_dims(t, 1)) ==> [2, 1]
+shape(expand_dims(t, -1)) ==> [2, 1]
+
+# 't2' is a tensor of shape [2, 3, 5]
+shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
+shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
+shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
+```
+
+This operation requires that:
+
+`-1-input.dims() <= dim <= input.dims()`
+
+This operation is related to `squeeze()`, which removes dimensions of
+size 1.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Expm1.pbtxt b/tensorflow/core/api_def/base_api/api_def_Expm1.pbtxt
new file mode 100644
index 0000000000..a048f2aa8b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Expm1.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "Expm1"
+ summary: "Computes exponential of x - 1 element-wise."
+ description: <<END
+I.e., \\(y = (\exp x) - 1\\).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ExtractGlimpse.pbtxt b/tensorflow/core/api_def/base_api/api_def_ExtractGlimpse.pbtxt
new file mode 100644
index 0000000000..c10a1bb778
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ExtractGlimpse.pbtxt
@@ -0,0 +1,77 @@
+op {
+ graph_op_name: "ExtractGlimpse"
+ in_arg {
+ name: "input"
+ description: <<END
+A 4-D float tensor of shape `[batch_size, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+A 1-D tensor of 2 elements containing the size of the glimpses
+to extract. The glimpse height must be specified first, following
+by the glimpse width.
+END
+ }
+ in_arg {
+ name: "offsets"
+ description: <<END
+A 2-D integer tensor of shape `[batch_size, 2]` containing
+the y, x locations of the center of each window.
+END
+ }
+ out_arg {
+ name: "glimpse"
+ description: <<END
+A tensor representing the glimpses `[batch_size,
+glimpse_height, glimpse_width, channels]`.
+END
+ }
+ attr {
+ name: "centered"
+ description: <<END
+indicates if the offset coordinates are centered relative to
+the image, in which case the (0, 0) offset is relative to the center
+of the input images. If false, the (0,0) offset corresponds to the
+upper left corner of the input images.
+END
+ }
+ attr {
+ name: "normalized"
+ description: <<END
+indicates if the offset coordinates are normalized.
+END
+ }
+ attr {
+ name: "uniform_noise"
+ description: <<END
+indicates if the noise should be generated using a
+uniform distribution or a Gaussian distribution.
+END
+ }
+ summary: "Extracts a glimpse from the input tensor."
+ description: <<END
+Returns a set of windows called glimpses extracted at location
+`offsets` from the input tensor. If the windows only partially
+overlaps the inputs, the non overlapping areas will be filled with
+random noise.
+
+The result is a 4-D tensor of shape `[batch_size, glimpse_height,
+glimpse_width, channels]`. The channels and batch dimensions are the
+same as that of the input tensor. The height and width of the output
+windows are specified in the `size` parameter.
+
+The argument `normalized` and `centered` controls how the windows are built:
+
+* If the coordinates are normalized but not centered, 0.0 and 1.0
+ correspond to the minimum and maximum of each height and width
+ dimension.
+* If the coordinates are both normalized and centered, they range from
+ -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
+ left corner, the lower right corner is located at (1.0, 1.0) and the
+ center is at (0, 0).
+* If the coordinates are not normalized they are interpreted as
+ numbers of pixels.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ExtractImagePatches.pbtxt b/tensorflow/core/api_def/base_api/api_def_ExtractImagePatches.pbtxt
new file mode 100644
index 0000000000..712a3b0a0f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ExtractImagePatches.pbtxt
@@ -0,0 +1,57 @@
+op {
+ graph_op_name: "ExtractImagePatches"
+ in_arg {
+ name: "images"
+ description: <<END
+4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
+END
+ }
+ out_arg {
+ name: "patches"
+ description: <<END
+4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
+ksize_cols * depth]` containing image patches with size
+`ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
+`out_rows` and `out_cols` are the dimensions of the output patches.
+END
+ }
+ attr {
+ name: "ksizes"
+ description: <<END
+The size of the sliding window for each dimension of `images`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D of length 4. How far the centers of two consecutive patches are in
+the images. Must be: `[1, stride_rows, stride_cols, 1]`.
+END
+ }
+ attr {
+ name: "rates"
+ description: <<END
+1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the
+input stride, specifying how far two consecutive patch samples are in the
+input. Equivalent to extracting patches with
+`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
+subsampling them spatially by a factor of `rates`. This is equivalent to
+`rate` in dilated (a.k.a. Atrous) convolutions.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+
+We specify the size-related attributes as:
+
+```python
+ ksizes = [1, ksize_rows, ksize_cols, 1]
+ strides = [1, strides_rows, strides_cols, 1]
+ rates = [1, rates_rows, rates_cols, 1]
+```
+END
+ }
+ summary: "Extract `patches` from `images` and put them in the \"depth\" output dimension."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ExtractJpegShape.pbtxt b/tensorflow/core/api_def/base_api/api_def_ExtractJpegShape.pbtxt
new file mode 100644
index 0000000000..c604adf449
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ExtractJpegShape.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "ExtractJpegShape"
+ in_arg {
+ name: "contents"
+ description: <<END
+0-D. The JPEG-encoded image.
+END
+ }
+ out_arg {
+ name: "image_shape"
+ description: <<END
+1-D. The image shape with format [height, width, channels].
+END
+ }
+ attr {
+ name: "output_type"
+ description: <<END
+(Optional) The output type of the operation (int32 or int64).
+Defaults to int32.
+END
+ }
+ summary: "Extract the shape information of a JPEG-encoded image."
+ description: <<END
+This op only parses the image header, so it is much faster than DecodeJpeg.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_F.pbtxt b/tensorflow/core/api_def/base_api/api_def_F.pbtxt
deleted file mode 100644
index 8c073d3369..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_F.pbtxt
+++ /dev/null
@@ -1,411 +0,0 @@
-op {
- graph_op_name: "FFT"
- endpoint {
- name: "FFT"
- }
- summary: "Fast Fourier transform."
- description: <<END
-Computes the 1-dimensional discrete Fourier transform over the inner-most
-dimension of `input`.
-END
-}
-op {
- graph_op_name: "FFT2D"
- endpoint {
- name: "FFT2D"
- }
- summary: "2D fast Fourier transform."
- description: <<END
-Computes the 2-dimensional discrete Fourier transform over the inner-most
-2 dimensions of `input`.
-END
-}
-op {
- graph_op_name: "FFT3D"
- endpoint {
- name: "FFT3D"
- }
- summary: "3D fast Fourier transform."
- description: <<END
-Computes the 3-dimensional discrete Fourier transform over the inner-most 3
-dimensions of `input`.
-END
-}
-op {
- graph_op_name: "FIFOQueue"
- endpoint {
- name: "FIFOQueue"
- }
- summary: "A queue that produces elements in first-in first-out order."
-}
-op {
- graph_op_name: "FIFOQueueV2"
- endpoint {
- name: "FIFOQueueV2"
- }
- summary: "A queue that produces elements in first-in first-out order."
-}
-op {
- graph_op_name: "Fact"
- endpoint {
- name: "Fact"
- }
- summary: "Output a fact about factorials."
-}
-op {
- graph_op_name: "FakeQuantWithMinMaxArgs"
- endpoint {
- name: "FakeQuantWithMinMaxArgs"
- }
- summary: "Fake-quantize the \'inputs\' tensor, type float to \'outputs\' tensor of same type."
- description: <<END
-Attributes `[min; max]` define the clamping range for the `inputs` data.
-`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
-when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
-then de-quantized and output as floats in `[min; max]` interval.
-`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
-
-Quantization is called fake since the output is still in floating point.
-END
-}
-op {
- graph_op_name: "FakeQuantWithMinMaxArgsGradient"
- endpoint {
- name: "FakeQuantWithMinMaxArgsGradient"
- }
- summary: "Compute gradients for a FakeQuantWithMinMaxArgs operation."
-}
-op {
- graph_op_name: "FakeQuantWithMinMaxVars"
- endpoint {
- name: "FakeQuantWithMinMaxVars"
- }
- summary: "Fake-quantize the \'inputs\' tensor of type float via global float scalars `min`"
- description: <<END
-and `max` to 'outputs' tensor of same shape as `inputs`.
-
-`[min; max]` define the clamping range for the `inputs` data.
-`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
-when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
-then de-quantized and output as floats in `[min; max]` interval.
-`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
-
-This operation has a gradient and thus allows for training `min` and `max`
-values.
-END
-}
-op {
- graph_op_name: "FakeQuantWithMinMaxVarsGradient"
- endpoint {
- name: "FakeQuantWithMinMaxVarsGradient"
- }
- summary: "Compute gradients for a FakeQuantWithMinMaxVars operation."
-}
-op {
- graph_op_name: "FakeQuantWithMinMaxVarsPerChannel"
- endpoint {
- name: "FakeQuantWithMinMaxVarsPerChannel"
- }
- summary: "Fake-quantize the \'inputs\' tensor of type float and one of the shapes: `[d]`,"
- description: <<END
-`[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
-to 'outputs' tensor of same shape as `inputs`.
-
-`[min; max]` define the clamping range for the `inputs` data.
-`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
-when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
-then de-quantized and output as floats in `[min; max]` interval.
-`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
-
-This operation has a gradient and thus allows for training `min` and `max`
-values.
-END
-}
-op {
- graph_op_name: "FakeQuantWithMinMaxVarsPerChannelGradient"
- endpoint {
- name: "FakeQuantWithMinMaxVarsPerChannelGradient"
- }
- summary: "Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation."
-}
-op {
- graph_op_name: "FakeQueue"
- endpoint {
- name: "FakeQueue"
- }
- summary: "Deprecated. Do not use."
-}
-op {
- graph_op_name: "Fill"
- endpoint {
- name: "Fill"
- }
- summary: "Creates a tensor filled with a scalar value."
- description: <<END
-This operation creates a tensor of shape `dims` and fills it with `value`.
-
-For example:
-
-```
-# Output tensor has shape [2, 3].
-fill([2, 3], 9) ==> [[9, 9, 9]
- [9, 9, 9]]
-```
-END
-}
-op {
- graph_op_name: "FilterDataset"
- endpoint {
- name: "FilterDataset"
- }
- summary: "Creates a dataset containing elements of `input_dataset` matching `predicate`."
- description: <<END
-The `predicate` function must return a scalar boolean and accept the
-following arguments:
-
-* One tensor for each component of an element of `input_dataset`.
-* One tensor for each value in `other_arguments`.
-END
-}
-op {
- graph_op_name: "FixedLengthRecordDataset"
- endpoint {
- name: "FixedLengthRecordDataset"
- }
- summary: "Creates a dataset that emits the records from one or more binary files."
-}
-op {
- graph_op_name: "FixedLengthRecordReader"
- endpoint {
- name: "FixedLengthRecordReader"
- }
- summary: "A Reader that outputs fixed-length records from a file."
-}
-op {
- graph_op_name: "FixedLengthRecordReaderV2"
- endpoint {
- name: "FixedLengthRecordReaderV2"
- }
- summary: "A Reader that outputs fixed-length records from a file."
-}
-op {
- graph_op_name: "FixedUnigramCandidateSampler"
- endpoint {
- name: "FixedUnigramCandidateSampler"
- }
- summary: "Generates labels for candidate sampling with a learned unigram distribution."
- description: <<END
-A unigram sampler could use a fixed unigram distribution read from a
-file or passed in as an in-memory array instead of building up the distribution
-from data on the fly. There is also an option to skew the distribution by
-applying a distortion power to the weights.
-
-The vocabulary file should be in CSV-like format, with the last field
-being the weight associated with the word.
-
-For each batch, this op picks a single set of sampled candidate labels.
-
-The advantages of sampling candidates per-batch are simplicity and the
-possibility of efficient dense matrix multiplication. The disadvantage is that
-the sampled candidates must be chosen independently of the context and of the
-true labels.
-END
-}
-op {
- graph_op_name: "FlatMapDataset"
- endpoint {
- name: "FlatMapDataset"
- }
- summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
- description: <<END
-Unlike MapDataset, the `f` in FlatMapDataset is expected to return a
-Dataset variant, and FlatMapDataset will flatten successive results
-into a single Dataset.
-END
-}
-op {
- graph_op_name: "Floor"
- endpoint {
- name: "Floor"
- }
- summary: "Returns element-wise largest integer not greater than x."
-}
-op {
- graph_op_name: "FloorDiv"
- endpoint {
- name: "FloorDiv"
- }
- summary: "Returns x // y element-wise."
- description: <<END
-*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "FloorMod"
- endpoint {
- name: "FloorMod"
- }
- summary: "Returns element-wise remainder of division. When `x < 0` xor `y < 0` is"
- description: <<END
-true, this follows Python semantics in that the result here is consistent
-with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
-
-*NOTE*: `FloorMod` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "FractionalAvgPool"
- endpoint {
- name: "FractionalAvgPool"
- }
- summary: "Performs fractional average pooling on the input."
- description: <<END
-Fractional average pooling is similar to Fractional max pooling in the pooling
-region generation step. The only difference is that after pooling regions are
-generated, a mean operation is performed instead of a max operation in each
-pooling region.
-END
-}
-op {
- graph_op_name: "FractionalAvgPoolGrad"
- endpoint {
- name: "FractionalAvgPoolGrad"
- }
- summary: "Computes gradient of the FractionalAvgPool function."
- description: <<END
-Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
-FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
-out_backprop to those indices that form the same pooling cell. Therefore, we
-just need to know the shape of original input tensor, instead of the whole
-tensor.
-END
-}
-op {
- graph_op_name: "FractionalMaxPool"
- endpoint {
- name: "FractionalMaxPool"
- }
- summary: "Performs fractional max pooling on the input."
- description: <<END
-Fractional max pooling is slightly different than regular max pooling. In
-regular max pooling, you downsize an input set by taking the maximum value of
-smaller N x N subsections of the set (often 2x2), and try to reduce the set by
-a factor of N, where N is an integer. Fractional max pooling, as you might
-expect from the word "fractional", means that the overall reduction ratio N
-does not have to be an integer.
-
-The sizes of the pooling regions are generated randomly but are fairly uniform.
-For example, let's look at the height dimension, and the constraints on the
-list of rows that will be pool boundaries.
-
-First we define the following:
-
-1. input_row_length : the number of rows from the input set
-2. output_row_length : which will be smaller than the input
-3. alpha = input_row_length / output_row_length : our reduction ratio
-4. K = floor(alpha)
-5. row_pooling_sequence : this is the result list of pool boundary rows
-
-Then, row_pooling_sequence should satisfy:
-
-1. a[0] = 0 : the first value of the sequence is 0
-2. a[end] = input_row_length : the last value of the sequence is the size
-3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
-4. length(row_pooling_sequence) = output_row_length+1
-
-For more details on fractional max pooling, see this paper:
-[Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
-END
-}
-op {
- graph_op_name: "FractionalMaxPoolGrad"
- endpoint {
- name: "FractionalMaxPoolGrad"
- }
- summary: "Computes gradient of the FractionalMaxPool function."
-}
-op {
- graph_op_name: "FusedBatchNorm"
- endpoint {
- name: "FusedBatchNorm"
- }
- summary: "Batch normalization."
- description: <<END
-Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
-The size of 1D Tensors matches the dimension C of the 4D Tensors.
-END
-}
-op {
- graph_op_name: "FusedBatchNormGrad"
- endpoint {
- name: "FusedBatchNormGrad"
- }
- summary: "Gradient for batch normalization."
- description: <<END
-Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
-The size of 1D Tensors matches the dimension C of the 4D Tensors.
-END
-}
-op {
- graph_op_name: "FusedBatchNormGradV2"
- endpoint {
- name: "FusedBatchNormGradV2"
- }
- summary: "Gradient for batch normalization."
- description: <<END
-Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
-The size of 1D Tensors matches the dimension C of the 4D Tensors.
-END
-}
-op {
- graph_op_name: "FusedBatchNormV2"
- endpoint {
- name: "FusedBatchNormV2"
- }
- summary: "Batch normalization."
- description: <<END
-Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
-The size of 1D Tensors matches the dimension C of the 4D Tensors.
-END
-}
-op {
- graph_op_name: "FusedPadConv2D"
- endpoint {
- name: "FusedPadConv2D"
- }
- summary: "Performs a padding as a preprocess during a convolution."
- description: <<END
-Similar to FusedResizeAndPadConv2d, this op allows for an optimized
-implementation where the spatial padding transformation stage is fused with the
-im2col lookup, but in this case without the bilinear filtering required for
-resizing. Fusing the padding prevents the need to write out the intermediate
-results as whole tensors, reducing memory pressure, and we can get some latency
-gains by merging the transformation calculations.
-The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
-order is used instead.
-Internally this op uses a single per-graph scratch buffer, which means that it
-will block if multiple versions are being run in parallel. This is because this
-operator is primarily an optimization to minimize memory usage.
-END
-}
-op {
- graph_op_name: "FusedResizeAndPadConv2D"
- endpoint {
- name: "FusedResizeAndPadConv2D"
- }
- summary: "Performs a resize and padding as a preprocess during a convolution."
- description: <<END
-It's often possible to do spatial transformations more efficiently as part of
-the packing stage of a convolution, so this op allows for an optimized
-implementation where these stages are fused together. This prevents the need to
-write out the intermediate results as whole tensors, reducing memory pressure,
-and we can get some latency gains by merging the transformation calculations.
-The data_format attribute for Conv2D isn't supported by this op, and defaults to
-'NHWC' order.
-Internally this op uses a single per-graph scratch buffer, which means that it
-will block if multiple versions are being run in parallel. This is because this
-operator is primarily an optimization to minimize memory usage.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_FFT.pbtxt b/tensorflow/core/api_def/base_api/api_def_FFT.pbtxt
new file mode 100644
index 0000000000..4e48d6c169
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FFT.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "FFT"
+ in_arg {
+ name: "input"
+ description: <<END
+A complex64 tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A complex64 tensor of the same shape as `input`. The inner-most
+ dimension of `input` is replaced with its 1D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.fft
+@end_compatibility
+END
+ }
+ summary: "Fast Fourier transform."
+ description: <<END
+Computes the 1-dimensional discrete Fourier transform over the inner-most
+dimension of `input`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FFT2D.pbtxt b/tensorflow/core/api_def/base_api/api_def_FFT2D.pbtxt
new file mode 100644
index 0000000000..555f8e6067
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FFT2D.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "FFT2D"
+ in_arg {
+ name: "input"
+ description: <<END
+A complex64 tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A complex64 tensor of the same shape as `input`. The inner-most 2
+ dimensions of `input` are replaced with their 2D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.fft2
+@end_compatibility
+END
+ }
+ summary: "2D fast Fourier transform."
+ description: <<END
+Computes the 2-dimensional discrete Fourier transform over the inner-most
+2 dimensions of `input`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FFT3D.pbtxt b/tensorflow/core/api_def/base_api/api_def_FFT3D.pbtxt
new file mode 100644
index 0000000000..abd2e67bce
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FFT3D.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "FFT3D"
+ in_arg {
+ name: "input"
+ description: <<END
+A complex64 tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A complex64 tensor of the same shape as `input`. The inner-most 3
+ dimensions of `input` are replaced with their 3D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.fftn with 3 dimensions.
+@end_compatibility
+END
+ }
+ summary: "3D fast Fourier transform."
+ description: <<END
+Computes the 3-dimensional discrete Fourier transform over the inner-most 3
+dimensions of `input`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FIFOQueue.pbtxt b/tensorflow/core/api_def/base_api/api_def_FIFOQueue.pbtxt
new file mode 100644
index 0000000000..751f73d66e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FIFOQueue.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "FIFOQueue"
+ visibility: SKIP
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the queue.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a value.
+END
+ }
+ attr {
+ name: "shapes"
+ description: <<END
+The shape of each component in a value. The length of this attr must
+be either 0 or the same as the length of component_types. If the length of
+this attr is 0, the shapes of queue elements are not constrained, and
+only one element may be dequeued at a time.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+The upper bound on the number of elements in this queue.
+Negative numbers mean no limit.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this queue is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this queue will be shared under the given name
+across multiple sessions.
+END
+ }
+ summary: "A queue that produces elements in first-in first-out order."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FIFOQueueV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_FIFOQueueV2.pbtxt
new file mode 100644
index 0000000000..2f7b84ff2a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FIFOQueueV2.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "FIFOQueueV2"
+ endpoint {
+ name: "FIFOQueue"
+ }
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the queue.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a value.
+END
+ }
+ attr {
+ name: "shapes"
+ description: <<END
+The shape of each component in a value. The length of this attr must
+be either 0 or the same as the length of component_types. If the length of
+this attr is 0, the shapes of queue elements are not constrained, and
+only one element may be dequeued at a time.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+The upper bound on the number of elements in this queue.
+Negative numbers mean no limit.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this queue is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this queue will be shared under the given name
+across multiple sessions.
+END
+ }
+ summary: "A queue that produces elements in first-in first-out order."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Fact.pbtxt b/tensorflow/core/api_def/base_api/api_def_Fact.pbtxt
new file mode 100644
index 0000000000..9aad4aac32
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Fact.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Fact"
+ summary: "Output a fact about factorials."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxArgs.pbtxt b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxArgs.pbtxt
new file mode 100644
index 0000000000..561c86ddf6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxArgs.pbtxt
@@ -0,0 +1,13 @@
+op {
+ graph_op_name: "FakeQuantWithMinMaxArgs"
+ summary: "Fake-quantize the \'inputs\' tensor, type float to \'outputs\' tensor of same type."
+ description: <<END
+Attributes `[min; max]` define the clamping range for the `inputs` data.
+`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
+when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
+then de-quantized and output as floats in `[min; max]` interval.
+`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
+
+Quantization is called fake since the output is still in floating point.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt
new file mode 100644
index 0000000000..5241acc559
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxArgsGradient.pbtxt
@@ -0,0 +1,23 @@
+op {
+ graph_op_name: "FakeQuantWithMinMaxArgsGradient"
+ in_arg {
+ name: "gradients"
+ description: <<END
+Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
+END
+ }
+ in_arg {
+ name: "inputs"
+ description: <<END
+Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
+END
+ }
+ out_arg {
+ name: "backprops"
+ description: <<END
+Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
+`gradients * (inputs >= min && inputs <= max)`.
+END
+ }
+ summary: "Compute gradients for a FakeQuantWithMinMaxArgs operation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVars.pbtxt b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVars.pbtxt
new file mode 100644
index 0000000000..2713c01b27
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVars.pbtxt
@@ -0,0 +1,16 @@
+op {
+ graph_op_name: "FakeQuantWithMinMaxVars"
+ summary: "Fake-quantize the \'inputs\' tensor of type float via global float scalars `min`"
+ description: <<END
+and `max` to 'outputs' tensor of same shape as `inputs`.
+
+`[min; max]` define the clamping range for the `inputs` data.
+`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
+when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
+then de-quantized and output as floats in `[min; max]` interval.
+`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
+
+This operation has a gradient and thus allows for training `min` and `max`
+values.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt
new file mode 100644
index 0000000000..d07d3b333b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsGradient.pbtxt
@@ -0,0 +1,50 @@
+op {
+ graph_op_name: "FakeQuantWithMinMaxVarsGradient"
+ in_arg {
+ name: "gradients"
+ description: <<END
+Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
+END
+ }
+ in_arg {
+ name: "inputs"
+ description: <<END
+Values passed as inputs to the FakeQuantWithMinMaxVars operation.
+min, max: Quantization interval, scalar floats.
+END
+ }
+ out_arg {
+ name: "backprops_wrt_input"
+ description: <<END
+Backpropagated gradients w.r.t. inputs:
+`gradients * (inputs >= min && inputs <= max)`.
+END
+ }
+ out_arg {
+ name: "backprop_wrt_min"
+ description: <<END
+Backpropagated gradients w.r.t. min parameter:
+`sum(gradients * (inputs < min))`.
+END
+ }
+ out_arg {
+ name: "backprop_wrt_max"
+ description: <<END
+Backpropagated gradients w.r.t. max parameter:
+`sum(gradients * (inputs > max))`.
+END
+ }
+ attr {
+ name: "num_bits"
+ description: <<END
+The bitwidth of the quantization; between 2 and 8, inclusive.
+END
+ }
+ attr {
+ name: "narrow_range"
+ description: <<END
+Whether to quantize into 2^num_bits - 1 distinct values.
+END
+ }
+ summary: "Compute gradients for a FakeQuantWithMinMaxVars operation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt
new file mode 100644
index 0000000000..e293d4d084
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsPerChannel.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "FakeQuantWithMinMaxVarsPerChannel"
+ summary: "Fake-quantize the \'inputs\' tensor of type float and one of the shapes: `[d]`,"
+ description: <<END
+`[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
+to 'outputs' tensor of same shape as `inputs`.
+
+`[min; max]` define the clamping range for the `inputs` data.
+`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
+when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
+then de-quantized and output as floats in `[min; max]` interval.
+`num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
+
+This operation has a gradient and thus allows for training `min` and `max`
+values.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt
new file mode 100644
index 0000000000..8a4ab368b5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FakeQuantWithMinMaxVarsPerChannelGradient.pbtxt
@@ -0,0 +1,53 @@
+op {
+ graph_op_name: "FakeQuantWithMinMaxVarsPerChannelGradient"
+ in_arg {
+ name: "gradients"
+ description: <<END
+Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
+shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.
+END
+ }
+ in_arg {
+ name: "inputs"
+ description: <<END
+Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
+ same as `gradients`.
+min, max: Quantization interval, floats of shape `[d]`.
+END
+ }
+ out_arg {
+ name: "backprops_wrt_input"
+ description: <<END
+Backpropagated gradients w.r.t. inputs, shape same as
+`inputs`:
+ `gradients * (inputs >= min && inputs <= max)`.
+END
+ }
+ out_arg {
+ name: "backprop_wrt_min"
+ description: <<END
+Backpropagated gradients w.r.t. min parameter, shape `[d]`:
+`sum_per_d(gradients * (inputs < min))`.
+END
+ }
+ out_arg {
+ name: "backprop_wrt_max"
+ description: <<END
+Backpropagated gradients w.r.t. max parameter, shape `[d]`:
+`sum_per_d(gradients * (inputs > max))`.
+END
+ }
+ attr {
+ name: "num_bits"
+ description: <<END
+The bitwidth of the quantization; between 2 and 8, inclusive.
+END
+ }
+ attr {
+ name: "narrow_range"
+ description: <<END
+Whether to quantize into 2^num_bits - 1 distinct values.
+END
+ }
+ summary: "Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FakeQueue.pbtxt b/tensorflow/core/api_def/base_api/api_def_FakeQueue.pbtxt
new file mode 100644
index 0000000000..224862246e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FakeQueue.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "FakeQueue"
+ visibility: SKIP
+ summary: "Deprecated. Do not use."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Fill.pbtxt b/tensorflow/core/api_def/base_api/api_def_Fill.pbtxt
new file mode 100644
index 0000000000..58262a385c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Fill.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "Fill"
+ in_arg {
+ name: "dims"
+ description: <<END
+1-D. Represents the shape of the output tensor.
+END
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+0-D (scalar). Value to fill the returned tensor.
+
+@compatibility(numpy)
+Equivalent to np.full
+@end_compatibility
+END
+ }
+ summary: "Creates a tensor filled with a scalar value."
+ description: <<END
+This operation creates a tensor of shape `dims` and fills it with `value`.
+
+For example:
+
+```
+# Output tensor has shape [2, 3].
+fill([2, 3], 9) ==> [[9, 9, 9]
+ [9, 9, 9]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FilterDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_FilterDataset.pbtxt
new file mode 100644
index 0000000000..fd60c0f378
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FilterDataset.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "FilterDataset"
+ in_arg {
+ name: "other_arguments"
+ description: <<END
+A list of tensors, typically values that were captured when
+building a closure for `predicate`.
+END
+ }
+ attr {
+ name: "predicate"
+ description: <<END
+A function returning a scalar boolean.
+END
+ }
+ summary: "Creates a dataset containing elements of `input_dataset` matching `predicate`."
+ description: <<END
+The `predicate` function must return a scalar boolean and accept the
+following arguments:
+
+* One tensor for each component of an element of `input_dataset`.
+* One tensor for each value in `other_arguments`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FixedLengthRecordDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_FixedLengthRecordDataset.pbtxt
new file mode 100644
index 0000000000..651b84d0d6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FixedLengthRecordDataset.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "FixedLengthRecordDataset"
+ in_arg {
+ name: "filenames"
+ description: <<END
+A scalar or a vector containing the name(s) of the file(s) to be
+read.
+END
+ }
+ in_arg {
+ name: "header_bytes"
+ description: <<END
+A scalar representing the number of bytes to skip at the
+beginning of a file.
+END
+ }
+ in_arg {
+ name: "record_bytes"
+ description: <<END
+A scalar representing the number of bytes in each record.
+END
+ }
+ in_arg {
+ name: "footer_bytes"
+ description: <<END
+A scalar representing the number of bytes to skip at the end
+of a file.
+END
+ }
+ in_arg {
+ name: "buffer_size"
+ description: <<END
+A scalar representing the number of bytes to buffer. Must be > 0.
+END
+ }
+ summary: "Creates a dataset that emits the records from one or more binary files."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FixedLengthRecordReader.pbtxt b/tensorflow/core/api_def/base_api/api_def_FixedLengthRecordReader.pbtxt
new file mode 100644
index 0000000000..0d7f3cbb43
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FixedLengthRecordReader.pbtxt
@@ -0,0 +1,50 @@
+op {
+ graph_op_name: "FixedLengthRecordReader"
+ visibility: SKIP
+ out_arg {
+ name: "reader_handle"
+ description: <<END
+The handle to reference the Reader.
+END
+ }
+ attr {
+ name: "header_bytes"
+ description: <<END
+Number of bytes in the header, defaults to 0.
+END
+ }
+ attr {
+ name: "record_bytes"
+ description: <<END
+Number of bytes in the record.
+END
+ }
+ attr {
+ name: "footer_bytes"
+ description: <<END
+Number of bytes in the footer, defaults to 0.
+END
+ }
+ attr {
+ name: "hop_bytes"
+ description: <<END
+Number of bytes to hop before each read. Default of 0 means using
+record_bytes.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this reader is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this reader is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ summary: "A Reader that outputs fixed-length records from a file."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FixedLengthRecordReaderV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_FixedLengthRecordReaderV2.pbtxt
new file mode 100644
index 0000000000..9a9067a592
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FixedLengthRecordReaderV2.pbtxt
@@ -0,0 +1,59 @@
+op {
+ graph_op_name: "FixedLengthRecordReaderV2"
+ endpoint {
+ name: "FixedLengthRecordReader"
+ }
+ out_arg {
+ name: "reader_handle"
+ description: <<END
+The handle to reference the Reader.
+END
+ }
+ attr {
+ name: "header_bytes"
+ description: <<END
+Number of bytes in the header, defaults to 0.
+END
+ }
+ attr {
+ name: "record_bytes"
+ description: <<END
+Number of bytes in the record.
+END
+ }
+ attr {
+ name: "footer_bytes"
+ description: <<END
+Number of bytes in the footer, defaults to 0.
+END
+ }
+ attr {
+ name: "hop_bytes"
+ description: <<END
+Number of bytes to hop before each read. Default of 0 means using
+record_bytes.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this reader is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this reader is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ attr {
+ name: "encoding"
+ description: <<END
+The type of encoding for the file. Currently ZLIB and GZIP
+are supported. Defaults to none.
+END
+ }
+ summary: "A Reader that outputs fixed-length records from a file."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FixedUnigramCandidateSampler.pbtxt b/tensorflow/core/api_def/base_api/api_def_FixedUnigramCandidateSampler.pbtxt
new file mode 100644
index 0000000000..6c40b16122
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FixedUnigramCandidateSampler.pbtxt
@@ -0,0 +1,144 @@
+op {
+ graph_op_name: "FixedUnigramCandidateSampler"
+ in_arg {
+ name: "true_classes"
+ description: <<END
+A batch_size * num_true matrix, in which each row contains the
+IDs of the num_true target_classes in the corresponding original label.
+END
+ }
+ out_arg {
+ name: "sampled_candidates"
+ description: <<END
+A vector of length num_sampled, in which each element is
+the ID of a sampled candidate.
+END
+ }
+ out_arg {
+ name: "true_expected_count"
+ description: <<END
+A batch_size * num_true matrix, representing
+the number of times each candidate is expected to occur in a batch
+of sampled candidates. If unique=true, then this is a probability.
+END
+ }
+ out_arg {
+ name: "sampled_expected_count"
+ description: <<END
+A vector of length num_sampled, for each sampled
+candidate representing the number of times the candidate is expected
+to occur in a batch of sampled candidates. If unique=true, then this is a
+probability.
+END
+ }
+ attr {
+ name: "num_true"
+ description: <<END
+Number of true labels per context.
+END
+ }
+ attr {
+ name: "num_sampled"
+ description: <<END
+Number of candidates to randomly sample.
+END
+ }
+ attr {
+ name: "unique"
+ description: <<END
+If unique is true, we sample with rejection, so that all sampled
+candidates in a batch are unique. This requires some approximation to
+estimate the post-rejection sampling probabilities.
+END
+ }
+ attr {
+ name: "range_max"
+ description: <<END
+The sampler will sample integers from the interval [0, range_max).
+END
+ }
+ attr {
+ name: "vocab_file"
+ description: <<END
+Each valid line in this file (which should have a CSV-like format)
+corresponds to a valid word ID. IDs are in sequential order, starting from
+num_reserved_ids. The last entry in each line is expected to be a value
+corresponding to the count or relative probability. Exactly one of vocab_file
+and unigrams needs to be passed to this op.
+END
+ }
+ attr {
+ name: "distortion"
+ description: <<END
+The distortion is used to skew the unigram probability distribution.
+Each weight is first raised to the distortion's power before adding to the
+internal unigram distribution. As a result, distortion = 1.0 gives regular
+unigram sampling (as defined by the vocab file), and distortion = 0.0 gives
+a uniform distribution.
+END
+ }
+ attr {
+ name: "num_reserved_ids"
+ description: <<END
+Optionally some reserved IDs can be added in the range [0,
+..., num_reserved_ids) by the users. One use case is that a special unknown
+word token is used as ID 0. These IDs will have a sampling probability of 0.
+END
+ }
+ attr {
+ name: "num_shards"
+ description: <<END
+A sampler can be used to sample from a subset of the original range
+in order to speed up the whole computation through parallelism. This parameter
+(together with 'shard') indicates the number of partitions that are being
+used in the overall computation.
+END
+ }
+ attr {
+ name: "shard"
+ description: <<END
+A sampler can be used to sample from a subset of the original range
+in order to speed up the whole computation through parallelism. This parameter
+(together with 'num_shards') indicates the particular partition number of a
+sampler op, when partitioning is being used.
+END
+ }
+ attr {
+ name: "unigrams"
+ description: <<END
+A list of unigram counts or probabilities, one per ID in sequential
+order. Exactly one of vocab_file and unigrams should be passed to this op.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+An second seed to avoid seed collision.
+END
+ }
+ summary: "Generates labels for candidate sampling with a learned unigram distribution."
+ description: <<END
+A unigram sampler could use a fixed unigram distribution read from a
+file or passed in as an in-memory array instead of building up the distribution
+from data on the fly. There is also an option to skew the distribution by
+applying a distortion power to the weights.
+
+The vocabulary file should be in CSV-like format, with the last field
+being the weight associated with the word.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FlatMapDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_FlatMapDataset.pbtxt
new file mode 100644
index 0000000000..1936119c50
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FlatMapDataset.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "FlatMapDataset"
+ attr {
+ name: "f"
+ description: <<END
+A function mapping elements of `input_dataset`, concatenated with
+`other_arguments`, to a Dataset variant that contains elements matching
+`output_types` and `output_shapes`.
+END
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
+ description: <<END
+Unlike MapDataset, the `f` in FlatMapDataset is expected to return a
+Dataset variant, and FlatMapDataset will flatten successive results
+into a single Dataset.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Floor.pbtxt b/tensorflow/core/api_def/base_api/api_def_Floor.pbtxt
new file mode 100644
index 0000000000..ecb697cc7a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Floor.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Floor"
+ summary: "Returns element-wise largest integer not greater than x."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FloorDiv.pbtxt b/tensorflow/core/api_def/base_api/api_def_FloorDiv.pbtxt
new file mode 100644
index 0000000000..913d4a1a52
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FloorDiv.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "FloorDiv"
+ summary: "Returns x // y element-wise."
+ description: <<END
+*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FloorMod.pbtxt b/tensorflow/core/api_def/base_api/api_def_FloorMod.pbtxt
new file mode 100644
index 0000000000..c3c0be91ae
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FloorMod.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "FloorMod"
+ summary: "Returns element-wise remainder of division. When `x < 0` xor `y < 0` is"
+ description: <<END
+true, this follows Python semantics in that the result here is consistent
+with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
+
+*NOTE*: `FloorMod` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FractionalAvgPool.pbtxt b/tensorflow/core/api_def/base_api/api_def_FractionalAvgPool.pbtxt
new file mode 100644
index 0000000000..03495b7ea5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FractionalAvgPool.pbtxt
@@ -0,0 +1,90 @@
+op {
+ graph_op_name: "FractionalAvgPool"
+ in_arg {
+ name: "value"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+output tensor after fractional avg pooling.
+END
+ }
+ out_arg {
+ name: "row_pooling_sequence"
+ description: <<END
+row pooling sequence, needed to calculate gradient.
+END
+ }
+ out_arg {
+ name: "col_pooling_sequence"
+ description: <<END
+column pooling sequence, needed to calculate gradient.
+END
+ }
+ attr {
+ name: "pooling_ratio"
+ description: <<END
+Pooling ratio for each dimension of `value`, currently only
+supports row and col dimension and should be >= 1.0. For example, a valid
+pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
+must be 1.0 because we don't allow pooling on batch and channels
+dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
+respectively.
+END
+ }
+ attr {
+ name: "pseudo_random"
+ description: <<END
+When set to True, generates the pooling sequence in a
+pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
+Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
+difference between pseudorandom and random.
+END
+ }
+ attr {
+ name: "overlapping"
+ description: <<END
+When set to True, it means when pooling, the values at the boundary
+of adjacent pooling cells are used by both cells. For example:
+
+`index 0 1 2 3 4`
+
+`value 20 5 16 3 7`
+
+If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
+The result would be [41/3, 26/3] for fractional avg pooling.
+END
+ }
+ attr {
+ name: "deterministic"
+ description: <<END
+When set to True, a fixed pooling region will be used when
+iterating over a FractionalAvgPool node in the computation graph. Mainly used
+in unit test to make FractionalAvgPool deterministic.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+An second seed to avoid seed collision.
+END
+ }
+ summary: "Performs fractional average pooling on the input."
+ description: <<END
+Fractional average pooling is similar to Fractional max pooling in the pooling
+region generation step. The only difference is that after pooling regions are
+generated, a mean operation is performed instead of a max operation in each
+pooling region.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FractionalAvgPoolGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_FractionalAvgPoolGrad.pbtxt
new file mode 100644
index 0000000000..a0cda03295
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FractionalAvgPoolGrad.pbtxt
@@ -0,0 +1,59 @@
+op {
+ graph_op_name: "FractionalAvgPoolGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "orig_input_tensor_shape"
+ description: <<END
+Original input tensor shape for `fractional_avg_pool`
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`. Gradients
+w.r.t. the output of `fractional_avg_pool`.
+END
+ }
+ in_arg {
+ name: "row_pooling_sequence"
+ description: <<END
+row pooling sequence, form pooling region with
+col_pooling_sequence.
+END
+ }
+ in_arg {
+ name: "col_pooling_sequence"
+ description: <<END
+column pooling sequence, form pooling region with
+row_pooling sequence.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D. Gradients w.r.t. the input of `fractional_avg_pool`.
+END
+ }
+ attr {
+ name: "overlapping"
+ description: <<END
+When set to True, it means when pooling, the values at the boundary
+of adjacent pooling cells are used by both cells. For example:
+
+`index 0 1 2 3 4`
+
+`value 20 5 16 3 7`
+
+If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
+The result would be [41/3, 26/3] for fractional avg pooling.
+END
+ }
+ summary: "Computes gradient of the FractionalAvgPool function."
+ description: <<END
+Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
+FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
+out_backprop to those indices that form the same pooling cell. Therefore, we
+just need to know the shape of original input tensor, instead of the whole
+tensor.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FractionalMaxPool.pbtxt b/tensorflow/core/api_def/base_api/api_def_FractionalMaxPool.pbtxt
new file mode 100644
index 0000000000..efc7719329
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FractionalMaxPool.pbtxt
@@ -0,0 +1,114 @@
+op {
+ graph_op_name: "FractionalMaxPool"
+ in_arg {
+ name: "value"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+output tensor after fractional max pooling.
+END
+ }
+ out_arg {
+ name: "row_pooling_sequence"
+ description: <<END
+row pooling sequence, needed to calculate gradient.
+END
+ }
+ out_arg {
+ name: "col_pooling_sequence"
+ description: <<END
+column pooling sequence, needed to calculate gradient.
+END
+ }
+ attr {
+ name: "pooling_ratio"
+ description: <<END
+Pooling ratio for each dimension of `value`, currently only
+supports row and col dimension and should be >= 1.0. For example, a valid
+pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
+must be 1.0 because we don't allow pooling on batch and channels
+dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
+respectively.
+END
+ }
+ attr {
+ name: "pseudo_random"
+ description: <<END
+When set to True, generates the pooling sequence in a
+pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
+Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
+difference between pseudorandom and random.
+END
+ }
+ attr {
+ name: "overlapping"
+ description: <<END
+When set to True, it means when pooling, the values at the boundary
+of adjacent pooling cells are used by both cells. For example:
+
+`index 0 1 2 3 4`
+
+`value 20 5 16 3 7`
+
+If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
+The result would be [20, 16] for fractional max pooling.
+END
+ }
+ attr {
+ name: "deterministic"
+ description: <<END
+When set to True, a fixed pooling region will be used when
+iterating over a FractionalMaxPool node in the computation graph. Mainly used
+in unit test to make FractionalMaxPool deterministic.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+An second seed to avoid seed collision.
+END
+ }
+ summary: "Performs fractional max pooling on the input."
+ description: <<END
+Fractional max pooling is slightly different than regular max pooling. In
+regular max pooling, you downsize an input set by taking the maximum value of
+smaller N x N subsections of the set (often 2x2), and try to reduce the set by
+a factor of N, where N is an integer. Fractional max pooling, as you might
+expect from the word "fractional", means that the overall reduction ratio N
+does not have to be an integer.
+
+The sizes of the pooling regions are generated randomly but are fairly uniform.
+For example, let's look at the height dimension, and the constraints on the
+list of rows that will be pool boundaries.
+
+First we define the following:
+
+1. input_row_length : the number of rows from the input set
+2. output_row_length : which will be smaller than the input
+3. alpha = input_row_length / output_row_length : our reduction ratio
+4. K = floor(alpha)
+5. row_pooling_sequence : this is the result list of pool boundary rows
+
+Then, row_pooling_sequence should satisfy:
+
+1. a[0] = 0 : the first value of the sequence is 0
+2. a[end] = input_row_length : the last value of the sequence is the size
+3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
+4. length(row_pooling_sequence) = output_row_length+1
+
+For more details on fractional max pooling, see this paper:
+[Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FractionalMaxPoolGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_FractionalMaxPoolGrad.pbtxt
new file mode 100644
index 0000000000..d7faa5b24a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FractionalMaxPoolGrad.pbtxt
@@ -0,0 +1,58 @@
+op {
+ graph_op_name: "FractionalMaxPoolGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "orig_input"
+ description: <<END
+Original input for `fractional_max_pool`
+END
+ }
+ in_arg {
+ name: "orig_output"
+ description: <<END
+Original output for `fractional_max_pool`
+END
+ }
+ in_arg {
+ name: "out_backprop"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`. Gradients
+w.r.t. the output of `fractional_max_pool`.
+END
+ }
+ in_arg {
+ name: "row_pooling_sequence"
+ description: <<END
+row pooling sequence, form pooling region with
+col_pooling_sequence.
+END
+ }
+ in_arg {
+ name: "col_pooling_sequence"
+ description: <<END
+column pooling sequence, form pooling region with
+row_pooling sequence.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D. Gradients w.r.t. the input of `fractional_max_pool`.
+END
+ }
+ attr {
+ name: "overlapping"
+ description: <<END
+When set to True, it means when pooling, the values at the boundary
+of adjacent pooling cells are used by both cells. For example:
+
+`index 0 1 2 3 4`
+
+`value 20 5 16 3 7`
+
+If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
+The result would be [20, 16] for fractional max pooling.
+END
+ }
+ summary: "Computes gradient of the FractionalMaxPool function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FusedBatchNorm.pbtxt b/tensorflow/core/api_def/base_api/api_def_FusedBatchNorm.pbtxt
new file mode 100644
index 0000000000..8f065d96fc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FusedBatchNorm.pbtxt
@@ -0,0 +1,99 @@
+op {
+ graph_op_name: "FusedBatchNorm"
+ in_arg {
+ name: "x"
+ description: <<END
+A 4D Tensor for input data.
+END
+ }
+ in_arg {
+ name: "scale"
+ description: <<END
+A 1D Tensor for scaling factor, to scale the normalized x.
+END
+ }
+ in_arg {
+ name: "offset"
+ description: <<END
+A 1D Tensor for offset, to shift to the normalized x.
+END
+ }
+ in_arg {
+ name: "mean"
+ description: <<END
+A 1D Tensor for population mean. Used for inference only;
+must be empty for training.
+END
+ }
+ in_arg {
+ name: "variance"
+ description: <<END
+A 1D Tensor for population variance. Used for inference only;
+must be empty for training.
+END
+ }
+ out_arg {
+ name: "y"
+ description: <<END
+A 4D Tensor for output data.
+END
+ }
+ out_arg {
+ name: "batch_mean"
+ description: <<END
+A 1D Tensor for the computed batch mean, to be used by TensorFlow
+to compute the running mean.
+END
+ }
+ out_arg {
+ name: "batch_variance"
+ description: <<END
+A 1D Tensor for the computed batch variance, to be used by
+TensorFlow to compute the running variance.
+END
+ }
+ out_arg {
+ name: "reserve_space_1"
+ description: <<END
+A 1D Tensor for the computed batch mean, to be reused
+in the gradient computation.
+END
+ }
+ out_arg {
+ name: "reserve_space_2"
+ description: <<END
+A 1D Tensor for the computed batch variance (inverted variance
+in the cuDNN case), to be reused in the gradient computation.
+END
+ }
+ attr {
+ name: "T"
+ description: <<END
+The data type for the elements of input and output Tensors.
+END
+ }
+ attr {
+ name: "epsilon"
+ description: <<END
+A small float number added to the variance of x.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format for x and y. Either "NHWC" (default) or "NCHW".
+END
+ }
+ attr {
+ name: "is_training"
+ description: <<END
+A bool value to indicate the operation is for training (default)
+or inference.
+END
+ }
+ summary: "Batch normalization."
+ description: <<END
+Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
+The size of 1D Tensors matches the dimension C of the 4D Tensors.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FusedBatchNormGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_FusedBatchNormGrad.pbtxt
new file mode 100644
index 0000000000..3d436e3690
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FusedBatchNormGrad.pbtxt
@@ -0,0 +1,102 @@
+op {
+ graph_op_name: "FusedBatchNormGrad"
+ in_arg {
+ name: "y_backprop"
+ description: <<END
+A 4D Tensor for the gradient with respect to y.
+END
+ }
+ in_arg {
+ name: "x"
+ description: <<END
+A 4D Tensor for input data.
+END
+ }
+ in_arg {
+ name: "scale"
+ description: <<END
+A 1D Tensor for scaling factor, to scale the normalized x.
+END
+ }
+ in_arg {
+ name: "reserve_space_1"
+ description: <<END
+When is_training is True, a 1D Tensor for the computed batch
+mean to be reused in gradient computation. When is_training is
+False, a 1D Tensor for the population mean to be reused in both
+1st and 2nd order gradient computation.
+END
+ }
+ in_arg {
+ name: "reserve_space_2"
+ description: <<END
+When is_training is True, a 1D Tensor for the computed batch
+variance (inverted variance in the cuDNN case) to be reused in
+gradient computation. When is_training is False, a 1D Tensor
+for the population variance to be reused in both 1st and 2nd
+order gradient computation.
+END
+ }
+ out_arg {
+ name: "x_backprop"
+ description: <<END
+A 4D Tensor for the gradient with respect to x.
+END
+ }
+ out_arg {
+ name: "scale_backprop"
+ description: <<END
+A 1D Tensor for the gradient with respect to scale.
+END
+ }
+ out_arg {
+ name: "offset_backprop"
+ description: <<END
+A 1D Tensor for the gradient with respect to offset.
+END
+ }
+ out_arg {
+ name: "reserve_space_3"
+ description: <<END
+Unused placeholder to match the mean input in FusedBatchNorm.
+END
+ }
+ out_arg {
+ name: "reserve_space_4"
+ description: <<END
+Unused placeholder to match the variance input
+in FusedBatchNorm.
+END
+ }
+ attr {
+ name: "T"
+ description: <<END
+The data type for the elements of input and output Tensors.
+END
+ }
+ attr {
+ name: "epsilon"
+ description: <<END
+A small float number added to the variance of x.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format for y_backprop, x, x_backprop.
+Either "NHWC" (default) or "NCHW".
+END
+ }
+ attr {
+ name: "is_training"
+ description: <<END
+A bool value to indicate the operation is for training (default)
+or inference.
+END
+ }
+ summary: "Gradient for batch normalization."
+ description: <<END
+Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
+The size of 1D Tensors matches the dimension C of the 4D Tensors.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FusedBatchNormGradV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_FusedBatchNormGradV2.pbtxt
new file mode 100644
index 0000000000..d8f04093a4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FusedBatchNormGradV2.pbtxt
@@ -0,0 +1,108 @@
+op {
+ graph_op_name: "FusedBatchNormGradV2"
+ in_arg {
+ name: "y_backprop"
+ description: <<END
+A 4D Tensor for the gradient with respect to y.
+END
+ }
+ in_arg {
+ name: "x"
+ description: <<END
+A 4D Tensor for input data.
+END
+ }
+ in_arg {
+ name: "scale"
+ description: <<END
+A 1D Tensor for scaling factor, to scale the normalized x.
+END
+ }
+ in_arg {
+ name: "reserve_space_1"
+ description: <<END
+When is_training is True, a 1D Tensor for the computed batch
+mean to be reused in gradient computation. When is_training is
+False, a 1D Tensor for the population mean to be reused in both
+1st and 2nd order gradient computation.
+END
+ }
+ in_arg {
+ name: "reserve_space_2"
+ description: <<END
+When is_training is True, a 1D Tensor for the computed batch
+variance (inverted variance in the cuDNN case) to be reused in
+gradient computation. When is_training is False, a 1D Tensor
+for the population variance to be reused in both 1st and 2nd
+order gradient computation.
+END
+ }
+ out_arg {
+ name: "x_backprop"
+ description: <<END
+A 4D Tensor for the gradient with respect to x.
+END
+ }
+ out_arg {
+ name: "scale_backprop"
+ description: <<END
+A 1D Tensor for the gradient with respect to scale.
+END
+ }
+ out_arg {
+ name: "offset_backprop"
+ description: <<END
+A 1D Tensor for the gradient with respect to offset.
+END
+ }
+ out_arg {
+ name: "reserve_space_3"
+ description: <<END
+Unused placeholder to match the mean input in FusedBatchNorm.
+END
+ }
+ out_arg {
+ name: "reserve_space_4"
+ description: <<END
+Unused placeholder to match the variance input
+in FusedBatchNorm.
+END
+ }
+ attr {
+ name: "T"
+ description: <<END
+The data type for the elements of input and output Tensors.
+END
+ }
+ attr {
+ name: "U"
+ description: <<END
+The data type for the scale, offset, mean, and variance.
+END
+ }
+ attr {
+ name: "epsilon"
+ description: <<END
+A small float number added to the variance of x.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format for y_backprop, x, x_backprop.
+Either "NHWC" (default) or "NCHW".
+END
+ }
+ attr {
+ name: "is_training"
+ description: <<END
+A bool value to indicate the operation is for training (default)
+or inference.
+END
+ }
+ summary: "Gradient for batch normalization."
+ description: <<END
+Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
+The size of 1D Tensors matches the dimension C of the 4D Tensors.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FusedBatchNormV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_FusedBatchNormV2.pbtxt
new file mode 100644
index 0000000000..df14adf49d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FusedBatchNormV2.pbtxt
@@ -0,0 +1,105 @@
+op {
+ graph_op_name: "FusedBatchNormV2"
+ in_arg {
+ name: "x"
+ description: <<END
+A 4D Tensor for input data.
+END
+ }
+ in_arg {
+ name: "scale"
+ description: <<END
+A 1D Tensor for scaling factor, to scale the normalized x.
+END
+ }
+ in_arg {
+ name: "offset"
+ description: <<END
+A 1D Tensor for offset, to shift to the normalized x.
+END
+ }
+ in_arg {
+ name: "mean"
+ description: <<END
+A 1D Tensor for population mean. Used for inference only;
+must be empty for training.
+END
+ }
+ in_arg {
+ name: "variance"
+ description: <<END
+A 1D Tensor for population variance. Used for inference only;
+must be empty for training.
+END
+ }
+ out_arg {
+ name: "y"
+ description: <<END
+A 4D Tensor for output data.
+END
+ }
+ out_arg {
+ name: "batch_mean"
+ description: <<END
+A 1D Tensor for the computed batch mean, to be used by TensorFlow
+to compute the running mean.
+END
+ }
+ out_arg {
+ name: "batch_variance"
+ description: <<END
+A 1D Tensor for the computed batch variance, to be used by
+TensorFlow to compute the running variance.
+END
+ }
+ out_arg {
+ name: "reserve_space_1"
+ description: <<END
+A 1D Tensor for the computed batch mean, to be reused
+in the gradient computation.
+END
+ }
+ out_arg {
+ name: "reserve_space_2"
+ description: <<END
+A 1D Tensor for the computed batch variance (inverted variance
+in the cuDNN case), to be reused in the gradient computation.
+END
+ }
+ attr {
+ name: "T"
+ description: <<END
+The data type for the elements of input and output Tensors.
+END
+ }
+ attr {
+ name: "U"
+ description: <<END
+The data type for the scale, offset, mean, and variance.
+END
+ }
+ attr {
+ name: "epsilon"
+ description: <<END
+A small float number added to the variance of x.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format for x and y. Either "NHWC" (default) or "NCHW".
+END
+ }
+ attr {
+ name: "is_training"
+ description: <<END
+A bool value to indicate the operation is for training (default)
+or inference.
+END
+ }
+ summary: "Batch normalization."
+ description: <<END
+Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
+The size of 1D Tensors matches the dimension C of the 4D Tensors.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FusedPadConv2D.pbtxt b/tensorflow/core/api_def/base_api/api_def_FusedPadConv2D.pbtxt
new file mode 100644
index 0000000000..5c2c3eb0c5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FusedPadConv2D.pbtxt
@@ -0,0 +1,50 @@
+op {
+ graph_op_name: "FusedPadConv2D"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D with shape `[batch, in_height, in_width, in_channels]`.
+END
+ }
+ in_arg {
+ name: "paddings"
+ description: <<END
+A two-column matrix specifying the padding sizes. The number of
+rows must be the same as the rank of `input`.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+4-D with shape
+`[filter_height, filter_width, in_channels, out_channels]`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D of length 4. The stride of the sliding window for each dimension
+of `input`. Must be in the same order as the dimension specified with format.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Performs a padding as a preprocess during a convolution."
+ description: <<END
+Similar to FusedResizeAndPadConv2d, this op allows for an optimized
+implementation where the spatial padding transformation stage is fused with the
+im2col lookup, but in this case without the bilinear filtering required for
+resizing. Fusing the padding prevents the need to write out the intermediate
+results as whole tensors, reducing memory pressure, and we can get some latency
+gains by merging the transformation calculations.
+The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
+order is used instead.
+Internally this op uses a single per-graph scratch buffer, which means that it
+will block if multiple versions are being run in parallel. This is because this
+operator is primarily an optimization to minimize memory usage.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_FusedResizeAndPadConv2D.pbtxt b/tensorflow/core/api_def/base_api/api_def_FusedResizeAndPadConv2D.pbtxt
new file mode 100644
index 0000000000..a72f2bfe5f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_FusedResizeAndPadConv2D.pbtxt
@@ -0,0 +1,64 @@
+op {
+ graph_op_name: "FusedResizeAndPadConv2D"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D with shape `[batch, in_height, in_width, in_channels]`.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
+new size for the images.
+END
+ }
+ in_arg {
+ name: "paddings"
+ description: <<END
+A two-column matrix specifying the padding sizes. The number of
+rows must be the same as the rank of `input`.
+END
+ }
+ in_arg {
+ name: "filter"
+ description: <<END
+4-D with shape
+`[filter_height, filter_width, in_channels, out_channels]`.
+END
+ }
+ attr {
+ name: "resize_align_corners"
+ description: <<END
+If true, rescale input by (new_height - 1) / (height - 1),
+which exactly aligns the 4 corners of images and resized images. If false, rescale
+by new_height / height. Treat similarly the width dimension.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D of length 4. The stride of the sliding window for each dimension
+of `input`. Must be in the same order as the dimension specified with format.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Performs a resize and padding as a preprocess during a convolution."
+ description: <<END
+It's often possible to do spatial transformations more efficiently as part of
+the packing stage of a convolution, so this op allows for an optimized
+implementation where these stages are fused together. This prevents the need to
+write out the intermediate results as whole tensors, reducing memory pressure,
+and we can get some latency gains by merging the transformation calculations.
+The data_format attribute for Conv2D isn't supported by this op, and defaults to
+'NHWC' order.
+Internally this op uses a single per-graph scratch buffer, which means that it
+will block if multiple versions are being run in parallel. This is because this
+operator is primarily an optimization to minimize memory usage.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_G.pbtxt b/tensorflow/core/api_def/base_api/api_def_G.pbtxt
deleted file mode 100644
index 343d505718..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_G.pbtxt
+++ /dev/null
@@ -1,257 +0,0 @@
-op {
- graph_op_name: "Gather"
- endpoint {
- name: "Gather"
- }
- summary: "Gather slices from `params` according to `indices`."
- description: <<END
-`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
-Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
-
-```python
- # Scalar indices
- output[:, ..., :] = params[indices, :, ... :]
-
- # Vector indices
- output[i, :, ..., :] = params[indices[i], :, ... :]
-
- # Higher rank indices
- output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
-```
-
-If `indices` is a permutation and `len(indices) == params.shape[0]` then
-this operation will permute `params` accordingly.
-
-`validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
-`indices` are always validated to be within range. If assigned to GPU,
-out-of-bound indices result in safe but unspecified behavior, which may include
-raising an error.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "GatherNd"
- endpoint {
- name: "GatherNd"
- }
- summary: "Gather slices from `params` into a Tensor with shape specified by `indices`."
- description: <<END
-`indices` is an K-dimensional integer tensor, best thought of as a
-(K-1)-dimensional tensor of indices into `params`, where each element defines a
-slice of `params`:
-
- output[i_0, ..., i_{K-2}] = params[indices[i0, ..., i_{K-2}]]
-
-Whereas in @{tf.gather} `indices` defines slices into the first
-dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
-first `N` dimensions of `params`, where `N = indices.shape[-1]`.
-
-The last dimension of `indices` can be at most the rank of
-`params`:
-
- indices.shape[-1] <= params.rank
-
-The last dimension of `indices` corresponds to elements
-(if `indices.shape[-1] == params.rank`) or slices
-(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
-of `params`. The output tensor has shape
-
- indices.shape[:-1] + params.shape[indices.shape[-1]:]
-
-Some examples below.
-
-Simple indexing into a matrix:
-
-```python
- indices = [[0, 0], [1, 1]]
- params = [['a', 'b'], ['c', 'd']]
- output = ['a', 'd']
-```
-
-Slice indexing into a matrix:
-
-```python
- indices = [[1], [0]]
- params = [['a', 'b'], ['c', 'd']]
- output = [['c', 'd'], ['a', 'b']]
-```
-
-Indexing into a 3-tensor:
-
-```python
- indices = [[1]]
- params = [[['a0', 'b0'], ['c0', 'd0']],
- [['a1', 'b1'], ['c1', 'd1']]]
- output = [[['a1', 'b1'], ['c1', 'd1']]]
-
-
- indices = [[0, 1], [1, 0]]
- params = [[['a0', 'b0'], ['c0', 'd0']],
- [['a1', 'b1'], ['c1', 'd1']]]
- output = [['c0', 'd0'], ['a1', 'b1']]
-
-
- indices = [[0, 0, 1], [1, 0, 1]]
- params = [[['a0', 'b0'], ['c0', 'd0']],
- [['a1', 'b1'], ['c1', 'd1']]]
- output = ['b0', 'b1']
-```
-
-Batched indexing into a matrix:
-
-```python
- indices = [[[0, 0]], [[0, 1]]]
- params = [['a', 'b'], ['c', 'd']]
- output = [['a'], ['b']]
-```
-
-Batched slice indexing into a matrix:
-
-```python
- indices = [[[1]], [[0]]]
- params = [['a', 'b'], ['c', 'd']]
- output = [[['c', 'd']], [['a', 'b']]]
-```
-
-Batched indexing into a 3-tensor:
-
-```python
- indices = [[[1]], [[0]]]
- params = [[['a0', 'b0'], ['c0', 'd0']],
- [['a1', 'b1'], ['c1', 'd1']]]
- output = [[[['a1', 'b1'], ['c1', 'd1']]],
- [[['a0', 'b0'], ['c0', 'd0']]]]
-
- indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
- params = [[['a0', 'b0'], ['c0', 'd0']],
- [['a1', 'b1'], ['c1', 'd1']]]
- output = [[['c0', 'd0'], ['a1', 'b1']],
- [['a0', 'b0'], ['c1', 'd1']]]
-
-
- indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
- params = [[['a0', 'b0'], ['c0', 'd0']],
- [['a1', 'b1'], ['c1', 'd1']]]
- output = [['b0', 'b1'], ['d0', 'c1']]
-```
-END
-}
-op {
- graph_op_name: "GatherV2"
- endpoint {
- name: "GatherV2"
- }
- summary: "Gather slices from `params` axis `axis` according to `indices`."
- description: <<END
-`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
-Produces an output tensor with shape `params.shape[:axis] + indices.shape +
-params.shape[axis + 1:]` where:
-
-```python
- # Scalar indices (output is rank(params) - 1).
- output[a_0, ..., a_n, b_0, ..., b_n] =
- params[a_0, ..., a_n, indices, b_0, ..., b_n]
-
- # Vector indices (output is rank(params)).
- output[a_0, ..., a_n, i, b_0, ..., b_n] =
- params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
-
- # Higher rank indices (output is rank(params) + rank(indices) - 1).
- output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
- params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
-```
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "GenerateVocabRemapping"
- endpoint {
- name: "GenerateVocabRemapping"
- }
- summary: "Given a path to new and old vocabulary files, returns a remapping Tensor of"
- description: <<END
-length `num_new_vocab`, where `remapping[i]` contains the row number in the old
-vocabulary that corresponds to row `i` in the new vocabulary (starting at line
-`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
-in the new vocabulary is not in the old vocabulary. `num_vocab_offset` enables
-use in the partitioned variable case, and should generally be set through
-examining partitioning info. The format of the files should be a text file,
-with each line containing a single entity within the vocabulary.
-
-For example, with `new_vocab_file` a text file containing each of the following
-elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
-`num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
-`[0, -1, 2]`.
-
-The op also returns a count of how many entries in the new vocabulary
-were present in the old vocabulary, which is used to calculate the number of
-values to initialize in a weight matrix remapping
-
-This functionality can be used to remap both row vocabularies (typically,
-features) and column vocabularies (typically, classes) from TensorFlow
-checkpoints. Note that the partitioning logic relies on contiguous vocabularies
-corresponding to div-partitioned variables. Moreover, the underlying remapping
-uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
-use the corresponding index_table_from_file() as the FeatureColumn framework
-does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
-END
-}
-op {
- graph_op_name: "GetSessionHandle"
- endpoint {
- name: "GetSessionHandle"
- }
- summary: "Store the input tensor in the state of the current session."
-}
-op {
- graph_op_name: "GetSessionHandleV2"
- endpoint {
- name: "GetSessionHandleV2"
- }
- summary: "Store the input tensor in the state of the current session."
-}
-op {
- graph_op_name: "GetSessionTensor"
- endpoint {
- name: "GetSessionTensor"
- }
- summary: "Get the value of the tensor specified by its handle."
-}
-op {
- graph_op_name: "Greater"
- endpoint {
- name: "Greater"
- }
- summary: "Returns the truth value of (x > y) element-wise."
- description: <<END
-*NOTE*: `Greater` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "GreaterEqual"
- endpoint {
- name: "GreaterEqual"
- }
- summary: "Returns the truth value of (x >= y) element-wise."
- description: <<END
-*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "GroupByWindowDataset"
- endpoint {
- name: "GroupByWindowDataset"
- }
- summary: "Creates a dataset that computes a windowed group-by on `input_dataset`."
- description: <<END
-// TODO(mrry): Support non-int64 keys.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_Gather.pbtxt b/tensorflow/core/api_def/base_api/api_def_Gather.pbtxt
new file mode 100644
index 0000000000..6dcf2252ce
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Gather.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "Gather"
+ summary: "Gather slices from `params` according to `indices`."
+ description: <<END
+`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
+Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
+
+```python
+ # Scalar indices
+ output[:, ..., :] = params[indices, :, ... :]
+
+ # Vector indices
+ output[i, :, ..., :] = params[indices[i], :, ... :]
+
+ # Higher rank indices
+ output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
+```
+
+If `indices` is a permutation and `len(indices) == params.shape[0]` then
+this operation will permute `params` accordingly.
+
+`validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
+`indices` are always validated to be within range. If assigned to GPU,
+out-of-bound indices result in safe but unspecified behavior, which may include
+raising an error.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt b/tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt
new file mode 100644
index 0000000000..c7f8b6c21b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt
@@ -0,0 +1,123 @@
+op {
+ graph_op_name: "GatherNd"
+ in_arg {
+ name: "params"
+ description: <<END
+The tensor from which to gather values.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+Index tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Values from `params` gathered from indices given by `indices`, with
+shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
+END
+ }
+ summary: "Gather slices from `params` into a Tensor with shape specified by `indices`."
+ description: <<END
+`indices` is an K-dimensional integer tensor, best thought of as a
+(K-1)-dimensional tensor of indices into `params`, where each element defines a
+slice of `params`:
+
+ output[i_0, ..., i_{K-2}] = params[indices[i0, ..., i_{K-2}]]
+
+Whereas in @{tf.gather} `indices` defines slices into the first
+dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
+first `N` dimensions of `params`, where `N = indices.shape[-1]`.
+
+The last dimension of `indices` can be at most the rank of
+`params`:
+
+ indices.shape[-1] <= params.rank
+
+The last dimension of `indices` corresponds to elements
+(if `indices.shape[-1] == params.rank`) or slices
+(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
+of `params`. The output tensor has shape
+
+ indices.shape[:-1] + params.shape[indices.shape[-1]:]
+
+Some examples below.
+
+Simple indexing into a matrix:
+
+```python
+ indices = [[0, 0], [1, 1]]
+ params = [['a', 'b'], ['c', 'd']]
+ output = ['a', 'd']
+```
+
+Slice indexing into a matrix:
+
+```python
+ indices = [[1], [0]]
+ params = [['a', 'b'], ['c', 'd']]
+ output = [['c', 'd'], ['a', 'b']]
+```
+
+Indexing into a 3-tensor:
+
+```python
+ indices = [[1]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = [[['a1', 'b1'], ['c1', 'd1']]]
+
+
+ indices = [[0, 1], [1, 0]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = [['c0', 'd0'], ['a1', 'b1']]
+
+
+ indices = [[0, 0, 1], [1, 0, 1]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = ['b0', 'b1']
+```
+
+Batched indexing into a matrix:
+
+```python
+ indices = [[[0, 0]], [[0, 1]]]
+ params = [['a', 'b'], ['c', 'd']]
+ output = [['a'], ['b']]
+```
+
+Batched slice indexing into a matrix:
+
+```python
+ indices = [[[1]], [[0]]]
+ params = [['a', 'b'], ['c', 'd']]
+ output = [[['c', 'd']], [['a', 'b']]]
+```
+
+Batched indexing into a 3-tensor:
+
+```python
+ indices = [[[1]], [[0]]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = [[[['a1', 'b1'], ['c1', 'd1']]],
+ [[['a0', 'b0'], ['c0', 'd0']]]]
+
+ indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = [[['c0', 'd0'], ['a1', 'b1']],
+ [['a0', 'b0'], ['c1', 'd1']]]
+
+
+ indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
+ params = [[['a0', 'b0'], ['c0', 'd0']],
+ [['a1', 'b1'], ['c1', 'd1']]]
+ output = [['b0', 'b1'], ['d0', 'c1']]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_GatherV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_GatherV2.pbtxt
new file mode 100644
index 0000000000..c020176a3b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_GatherV2.pbtxt
@@ -0,0 +1,54 @@
+op {
+ graph_op_name: "GatherV2"
+ in_arg {
+ name: "params"
+ description: <<END
+The tensor from which to gather values. Must be at least rank
+`axis + 1`.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+Index tensor. Must be in range `[0, params.shape[axis])`.
+END
+ }
+ in_arg {
+ name: "axis"
+ description: <<END
+The axis in `params` to gather `indices` from. Defaults to the first
+dimension. Supports negative indexes.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Values from `params` gathered from indices given by `indices`, with
+shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
+END
+ }
+ summary: "Gather slices from `params` axis `axis` according to `indices`."
+ description: <<END
+`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
+Produces an output tensor with shape `params.shape[:axis] + indices.shape +
+params.shape[axis + 1:]` where:
+
+```python
+ # Scalar indices (output is rank(params) - 1).
+ output[a_0, ..., a_n, b_0, ..., b_n] =
+ params[a_0, ..., a_n, indices, b_0, ..., b_n]
+
+ # Vector indices (output is rank(params)).
+ output[a_0, ..., a_n, i, b_0, ..., b_n] =
+ params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
+
+ # Higher rank indices (output is rank(params) + rank(indices) - 1).
+ output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
+ params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
+```
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_GenerateVocabRemapping.pbtxt b/tensorflow/core/api_def/base_api/api_def_GenerateVocabRemapping.pbtxt
new file mode 100644
index 0000000000..085acf7ff1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_GenerateVocabRemapping.pbtxt
@@ -0,0 +1,68 @@
+op {
+ graph_op_name: "GenerateVocabRemapping"
+ in_arg {
+ name: "new_vocab_file"
+ description: <<END
+Path to the new vocab file.
+END
+ }
+ in_arg {
+ name: "old_vocab_file"
+ description: <<END
+Path to the old vocab file.
+END
+ }
+ out_arg {
+ name: "remapping"
+ description: <<END
+A Tensor of length num_new_vocab where the element at index i
+is equal to the old ID that maps to the new ID i. This element is -1 for any
+new ID that is not found in the old vocabulary.
+END
+ }
+ out_arg {
+ name: "num_present"
+ description: <<END
+Number of new vocab entries found in old vocab.
+END
+ }
+ attr {
+ name: "new_vocab_offset"
+ description: <<END
+How many entries into the new vocab file to start reading.
+END
+ }
+ attr {
+ name: "num_new_vocab"
+ description: <<END
+Number of entries in the new vocab file to remap.
+END
+ }
+ summary: "Given a path to new and old vocabulary files, returns a remapping Tensor of"
+ description: <<END
+length `num_new_vocab`, where `remapping[i]` contains the row number in the old
+vocabulary that corresponds to row `i` in the new vocabulary (starting at line
+`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
+in the new vocabulary is not in the old vocabulary. `num_vocab_offset` enables
+use in the partitioned variable case, and should generally be set through
+examining partitioning info. The format of the files should be a text file,
+with each line containing a single entity within the vocabulary.
+
+For example, with `new_vocab_file` a text file containing each of the following
+elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
+`num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
+`[0, -1, 2]`.
+
+The op also returns a count of how many entries in the new vocabulary
+were present in the old vocabulary, which is used to calculate the number of
+values to initialize in a weight matrix remapping
+
+This functionality can be used to remap both row vocabularies (typically,
+features) and column vocabularies (typically, classes) from TensorFlow
+checkpoints. Note that the partitioning logic relies on contiguous vocabularies
+corresponding to div-partitioned variables. Moreover, the underlying remapping
+uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
+use the corresponding index_table_from_file() as the FeatureColumn framework
+does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_GetSessionHandle.pbtxt b/tensorflow/core/api_def/base_api/api_def_GetSessionHandle.pbtxt
new file mode 100644
index 0000000000..243712c853
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_GetSessionHandle.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "GetSessionHandle"
+ in_arg {
+ name: "value"
+ description: <<END
+The tensor to be stored.
+END
+ }
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle for the tensor stored in the session state, represented
+as a string.
+END
+ }
+ summary: "Store the input tensor in the state of the current session."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_GetSessionHandleV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_GetSessionHandleV2.pbtxt
new file mode 100644
index 0000000000..63cdc053c7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_GetSessionHandleV2.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "GetSessionHandleV2"
+ in_arg {
+ name: "value"
+ description: <<END
+The tensor to be stored.
+END
+ }
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle for the tensor stored in the session state, represented
+as a ResourceHandle object.
+END
+ }
+ summary: "Store the input tensor in the state of the current session."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_GetSessionTensor.pbtxt b/tensorflow/core/api_def/base_api/api_def_GetSessionTensor.pbtxt
new file mode 100644
index 0000000000..89bd3efe22
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_GetSessionTensor.pbtxt
@@ -0,0 +1,22 @@
+op {
+ graph_op_name: "GetSessionTensor"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle for a tensor stored in the session state.
+END
+ }
+ out_arg {
+ name: "value"
+ description: <<END
+The tensor for the given handle.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the output value.
+END
+ }
+ summary: "Get the value of the tensor specified by its handle."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Greater.pbtxt b/tensorflow/core/api_def/base_api/api_def_Greater.pbtxt
new file mode 100644
index 0000000000..4a4e2f2edd
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Greater.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Greater"
+ summary: "Returns the truth value of (x > y) element-wise."
+ description: <<END
+*NOTE*: `Greater` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_GreaterEqual.pbtxt b/tensorflow/core/api_def/base_api/api_def_GreaterEqual.pbtxt
new file mode 100644
index 0000000000..dc947f0488
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_GreaterEqual.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "GreaterEqual"
+ summary: "Returns the truth value of (x >= y) element-wise."
+ description: <<END
+*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_GroupByWindowDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_GroupByWindowDataset.pbtxt
new file mode 100644
index 0000000000..ea6bcd4695
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_GroupByWindowDataset.pbtxt
@@ -0,0 +1,14 @@
+op {
+ graph_op_name: "GroupByWindowDataset"
+ attr {
+ name: "key_func"
+ description: <<END
+A function mapping an element of `input_dataset`, concatenated
+with `key_func_other_arguments` to a scalar value of type DT_INT64.
+END
+ }
+ summary: "Creates a dataset that computes a windowed group-by on `input_dataset`."
+ description: <<END
+// TODO(mrry): Support non-int64 keys.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_H.pbtxt b/tensorflow/core/api_def/base_api/api_def_H.pbtxt
deleted file mode 100644
index 71282e7def..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_H.pbtxt
+++ /dev/null
@@ -1,52 +0,0 @@
-op {
- graph_op_name: "HSVToRGB"
- endpoint {
- name: "HSVToRGB"
- }
- summary: "Convert one or more images from HSV to RGB."
- description: <<END
-Outputs a tensor of the same shape as the `images` tensor, containing the RGB
-value of the pixels. The output is only well defined if the value in `images`
-are in `[0,1]`.
-
-See `rgb_to_hsv` for a description of the HSV encoding.
-END
-}
-op {
- graph_op_name: "HashTable"
- endpoint {
- name: "HashTable"
- }
- summary: "Creates a non-initialized hash table."
- description: <<END
-This op creates a hash table, specifying the type of its keys and values.
-Before using the table you will have to initialize it. After initialization the
-table will be immutable.
-END
-}
-op {
- graph_op_name: "HashTableV2"
- endpoint {
- name: "HashTableV2"
- }
- summary: "Creates a non-initialized hash table."
- description: <<END
-This op creates a hash table, specifying the type of its keys and values.
-Before using the table you will have to initialize it. After initialization the
-table will be immutable.
-END
-}
-op {
- graph_op_name: "HistogramSummary"
- endpoint {
- name: "HistogramSummary"
- }
- summary: "Outputs a `Summary` protocol buffer with a histogram."
- description: <<END
-The generated
-[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
-has one summary value containing a histogram for `values`.
-
-This op reports an `InvalidArgument` error if any value is not finite.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_HSVToRGB.pbtxt b/tensorflow/core/api_def/base_api/api_def_HSVToRGB.pbtxt
new file mode 100644
index 0000000000..5b23ef3c41
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_HSVToRGB.pbtxt
@@ -0,0 +1,23 @@
+op {
+ graph_op_name: "HSVToRGB"
+ in_arg {
+ name: "images"
+ description: <<END
+1-D or higher rank. HSV data to convert. Last dimension must be size 3.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+`images` converted to RGB.
+END
+ }
+ summary: "Convert one or more images from HSV to RGB."
+ description: <<END
+Outputs a tensor of the same shape as the `images` tensor, containing the RGB
+value of the pixels. The output is only well defined if the value in `images`
+are in `[0,1]`.
+
+See `rgb_to_hsv` for a description of the HSV encoding.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_HashTable.pbtxt b/tensorflow/core/api_def/base_api/api_def_HashTable.pbtxt
new file mode 100644
index 0000000000..bb20232a89
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_HashTable.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "HashTable"
+ visibility: SKIP
+ out_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this table is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this table is shared under the given name across
+multiple sessions.
+END
+ }
+ attr {
+ name: "use_node_name_sharing"
+ description: <<END
+If true and shared_name is empty, the table is shared
+using the node name.
+END
+ }
+ attr {
+ name: "key_dtype"
+ description: <<END
+Type of the table keys.
+END
+ }
+ attr {
+ name: "value_dtype"
+ description: <<END
+Type of the table values.
+END
+ }
+ summary: "Creates a non-initialized hash table."
+ description: <<END
+This op creates a hash table, specifying the type of its keys and values.
+Before using the table you will have to initialize it. After initialization the
+table will be immutable.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_HashTableV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_HashTableV2.pbtxt
new file mode 100644
index 0000000000..eddd4e256c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_HashTableV2.pbtxt
@@ -0,0 +1,51 @@
+op {
+ graph_op_name: "HashTableV2"
+ endpoint {
+ name: "HashTable"
+ }
+ out_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this table is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this table is shared under the given name across
+multiple sessions.
+END
+ }
+ attr {
+ name: "use_node_name_sharing"
+ description: <<END
+If true and shared_name is empty, the table is shared
+using the node name.
+END
+ }
+ attr {
+ name: "key_dtype"
+ description: <<END
+Type of the table keys.
+END
+ }
+ attr {
+ name: "value_dtype"
+ description: <<END
+Type of the table values.
+END
+ }
+ summary: "Creates a non-initialized hash table."
+ description: <<END
+This op creates a hash table, specifying the type of its keys and values.
+Before using the table you will have to initialize it. After initialization the
+table will be immutable.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_HistogramFixedWidth.pbtxt b/tensorflow/core/api_def/base_api/api_def_HistogramFixedWidth.pbtxt
new file mode 100644
index 0000000000..9b7fcd67f1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_HistogramFixedWidth.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "HistogramFixedWidth"
+ in_arg {
+ name: "values"
+ description: <<END
+Numeric `Tensor`.
+END
+ }
+ in_arg {
+ name: "value_range"
+ description: <<END
+Shape [2] `Tensor` of same `dtype` as `values`.
+values <= value_range[0] will be mapped to hist[0],
+values >= value_range[1] will be mapped to hist[-1].
+END
+ }
+ in_arg {
+ name: "nbins"
+ description: <<END
+Scalar `int32 Tensor`. Number of histogram bins.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+A 1-D `Tensor` holding histogram of values.
+END
+ }
+ summary: "Return histogram of values."
+ description: <<END
+Given the tensor `values`, this operation returns a rank 1 histogram counting
+the number of entries in `values` that fall into every bin. The bins are
+equal width and determined by the arguments `value_range` and `nbins`.
+
+```python
+# Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
+nbins = 5
+value_range = [0.0, 5.0]
+new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
+
+with tf.get_default_session() as sess:
+ hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
+ variables.global_variables_initializer().run()
+ sess.run(hist) => [2, 1, 1, 0, 2]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_HistogramSummary.pbtxt b/tensorflow/core/api_def/base_api/api_def_HistogramSummary.pbtxt
new file mode 100644
index 0000000000..faf1ed5abd
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_HistogramSummary.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "HistogramSummary"
+ in_arg {
+ name: "tag"
+ description: <<END
+Scalar. Tag to use for the `Summary.Value`.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+Any shape. Values to use to build the histogram.
+END
+ }
+ out_arg {
+ name: "summary"
+ description: <<END
+Scalar. Serialized `Summary` protocol buffer.
+END
+ }
+ summary: "Outputs a `Summary` protocol buffer with a histogram."
+ description: <<END
+The generated
+[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+has one summary value containing a histogram for `values`.
+
+This op reports an `InvalidArgument` error if any value is not finite.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_I.pbtxt b/tensorflow/core/api_def/base_api/api_def_I.pbtxt
deleted file mode 100644
index caaf93bf88..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_I.pbtxt
+++ /dev/null
@@ -1,518 +0,0 @@
-op {
- graph_op_name: "IFFT"
- endpoint {
- name: "IFFT"
- }
- summary: "Inverse fast Fourier transform."
- description: <<END
-Computes the inverse 1-dimensional discrete Fourier transform over the
-inner-most dimension of `input`.
-END
-}
-op {
- graph_op_name: "IFFT2D"
- endpoint {
- name: "IFFT2D"
- }
- summary: "Inverse 2D fast Fourier transform."
- description: <<END
-Computes the inverse 2-dimensional discrete Fourier transform over the
-inner-most 2 dimensions of `input`.
-END
-}
-op {
- graph_op_name: "IFFT3D"
- endpoint {
- name: "IFFT3D"
- }
- summary: "Inverse 3D fast Fourier transform."
- description: <<END
-Computes the inverse 3-dimensional discrete Fourier transform over the
-inner-most 3 dimensions of `input`.
-END
-}
-op {
- graph_op_name: "IRFFT"
- endpoint {
- name: "IRFFT"
- }
- summary: "Inverse real-valued fast Fourier transform."
- description: <<END
-Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
-signal over the inner-most dimension of `input`.
-
-The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
-`fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
-`fft_length` is not provided, it is computed from the size of the inner-most
-dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
-compute `input` is odd, it should be provided since it cannot be inferred
-properly.
-
-Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
-than the corresponding dimension of `input`, the dimension is cropped. If it is
-larger, the dimension is padded with zeros.
-END
-}
-op {
- graph_op_name: "IRFFT2D"
- endpoint {
- name: "IRFFT2D"
- }
- summary: "Inverse 2D real-valued fast Fourier transform."
- description: <<END
-Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
-signal over the inner-most 2 dimensions of `input`.
-
-The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
-The inner-most dimension contains the `fft_length / 2 + 1` unique components of
-the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
-from the size of the inner-most 2 dimensions of `input`. If the FFT length used
-to compute `input` is odd, it should be provided since it cannot be inferred
-properly.
-
-Along each axis `IRFFT2D` is computed on, if `fft_length` (or
-`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
-corresponding dimension of `input`, the dimension is cropped. If it is larger,
-the dimension is padded with zeros.
-END
-}
-op {
- graph_op_name: "IRFFT3D"
- endpoint {
- name: "IRFFT3D"
- }
- summary: "Inverse 3D real-valued fast Fourier transform."
- description: <<END
-Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
-signal over the inner-most 3 dimensions of `input`.
-
-The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
-The inner-most dimension contains the `fft_length / 2 + 1` unique components of
-the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
-from the size of the inner-most 3 dimensions of `input`. If the FFT length used
-to compute `input` is odd, it should be provided since it cannot be inferred
-properly.
-
-Along each axis `IRFFT3D` is computed on, if `fft_length` (or
-`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
-corresponding dimension of `input`, the dimension is cropped. If it is larger,
-the dimension is padded with zeros.
-END
-}
-op {
- graph_op_name: "Identity"
- endpoint {
- name: "Identity"
- }
- summary: "Return a tensor with the same shape and contents as the input tensor or value."
-}
-op {
- graph_op_name: "IdentityN"
- endpoint {
- name: "IdentityN"
- }
- summary: "Returns a list of tensors with the same shapes and contents as the input"
- description: <<END
-tensors.
-
-This op can be used to override the gradient for complicated functions. For
-example, suppose y = f(x) and we wish to apply a custom function g for backprop
-such that dx = g(dy). In Python,
-
-```python
-with tf.get_default_graph().gradient_override_map(
- {'IdentityN': 'OverrideGradientWithG'}):
- y, _ = identity_n([f(x), x])
-
-@tf.RegisterGradient('OverrideGradientWithG')
-def ApplyG(op, dy, _):
- return [None, g(dy)] # Do not backprop to f(x).
-```
-END
-}
-op {
- graph_op_name: "IdentityReader"
- endpoint {
- name: "IdentityReader"
- }
- summary: "A Reader that outputs the queued work as both the key and value."
- description: <<END
-To use, enqueue strings in a Queue. ReaderRead will take the front
-work string and output (work, work).
-END
-}
-op {
- graph_op_name: "IdentityReaderV2"
- endpoint {
- name: "IdentityReaderV2"
- }
- summary: "A Reader that outputs the queued work as both the key and value."
- description: <<END
-To use, enqueue strings in a Queue. ReaderRead will take the front
-work string and output (work, work).
-END
-}
-op {
- graph_op_name: "Igamma"
- endpoint {
- name: "Igamma"
- }
- summary: "Compute the lower regularized incomplete Gamma function `Q(a, x)`."
- description: <<END
-The lower regularized incomplete Gamma function is defined as:
-
-
-\\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
-
-where
-
-\\(gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt\\)
-
-is the lower incomplete Gamma function.
-
-Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
-Gamma function.
-END
-}
-op {
- graph_op_name: "Igammac"
- endpoint {
- name: "Igammac"
- }
- summary: "Compute the upper regularized incomplete Gamma function `Q(a, x)`."
- description: <<END
-The upper regularized incomplete Gamma function is defined as:
-
-\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
-
-where
-
-\\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
-
-is the upper incomplete Gama function.
-
-Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
-Gamma function.
-END
-}
-op {
- graph_op_name: "IgnoreErrorsDataset"
- endpoint {
- name: "IgnoreErrorsDataset"
- }
- summary: "Creates a dataset that contains the elements of `input_dataset` ignoring errors."
-}
-op {
- graph_op_name: "Imag"
- endpoint {
- name: "Imag"
- }
- summary: "Returns the imaginary part of a complex number."
- description: <<END
-Given a tensor `input` of complex numbers, this operation returns a tensor of
-type `float` that is the imaginary part of each element in `input`. All
-elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
-is the real part and *b* is the imaginary part returned by this operation.
-
-For example:
-
-```
-# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
-tf.imag(input) ==> [4.75, 5.75]
-```
-END
-}
-op {
- graph_op_name: "ImageSummary"
- endpoint {
- name: "ImageSummary"
- }
- summary: "Outputs a `Summary` protocol buffer with images."
- description: <<END
-The summary has up to `max_images` summary values containing images. The
-images are built from `tensor` which must be 4-D with shape `[batch_size,
-height, width, channels]` and where `channels` can be:
-
-* 1: `tensor` is interpreted as Grayscale.
-* 3: `tensor` is interpreted as RGB.
-* 4: `tensor` is interpreted as RGBA.
-
-The images have the same number of channels as the input tensor. For float
-input, the values are normalized one image at a time to fit in the range
-`[0, 255]`. `uint8` values are unchanged. The op uses two different
-normalization algorithms:
-
-* If the input values are all positive, they are rescaled so the largest one
- is 255.
-
-* If any input value is negative, the values are shifted so input value 0.0
- is at 127. They are then rescaled so that either the smallest value is 0,
- or the largest one is 255.
-
-The `tag` argument is a scalar `Tensor` of type `string`. It is used to
-build the `tag` of the summary values:
-
-* If `max_images` is 1, the summary value tag is '*tag*/image'.
-* If `max_images` is greater than 1, the summary value tags are
- generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
-
-The `bad_color` argument is the color to use in the generated images for
-non-finite input values. It is a `unit8` 1-D tensor of length `channels`.
-Each element must be in the range `[0, 255]` (It represents the value of a
-pixel in the output image). Non-finite values in the input tensor are
-replaced by this tensor in the output image. The default value is the color
-red.
-END
-}
-op {
- graph_op_name: "ImmutableConst"
- endpoint {
- name: "ImmutableConst"
- }
- summary: "Returns immutable tensor from memory region."
- description: <<END
-The current implementation memmaps the tensor from a file.
-END
-}
-op {
- graph_op_name: "InTopK"
- endpoint {
- name: "InTopK"
- }
- summary: "Says whether the targets are in the top `K` predictions."
- description: <<END
-This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
-prediction for the target class is among the top `k` predictions among
-all predictions for example `i`. Note that the behavior of `InTopK` differs
-from the `TopK` op in its handling of ties; if multiple classes have the
-same prediction value and straddle the top-`k` boundary, all of those
-classes are considered to be in the top `k`.
-
-More formally, let
-
- \\(predictions_i\\) be the predictions for all classes for example `i`,
- \\(targets_i\\) be the target class for example `i`,
- \\(out_i\\) be the output for example `i`,
-
-$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
-END
-}
-op {
- graph_op_name: "InTopKV2"
- endpoint {
- name: "InTopKV2"
- }
- summary: "Says whether the targets are in the top `K` predictions."
- description: <<END
-This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
-prediction for the target class is among the top `k` predictions among
-all predictions for example `i`. Note that the behavior of `InTopK` differs
-from the `TopK` op in its handling of ties; if multiple classes have the
-same prediction value and straddle the top-`k` boundary, all of those
-classes are considered to be in the top `k`.
-
-More formally, let
-
- \\(predictions_i\\) be the predictions for all classes for example `i`,
- \\(targets_i\\) be the target class for example `i`,
- \\(out_i\\) be the output for example `i`,
-
-$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
-END
-}
-op {
- graph_op_name: "InitializeTable"
- endpoint {
- name: "InitializeTable"
- }
- summary: "Table initializer that takes two tensors for keys and values respectively."
-}
-op {
- graph_op_name: "InitializeTableFromTextFile"
- endpoint {
- name: "InitializeTableFromTextFile"
- }
- summary: "Initializes a table from a text file."
- description: <<END
-It inserts one key-value pair into the table for each line of the file.
-The key and value is extracted from the whole line content, elements from the
-split line based on `delimiter` or the line number (starting from zero).
-Where to extract the key and value from a line is specified by `key_index` and
-`value_index`.
-
-- A value of -1 means use the line number(starting from zero), expects `int64`.
-- A value of -2 means use the whole line content, expects `string`.
-- A value >= 0 means use the index (starting at zero) of the split line based
- on `delimiter`.
-END
-}
-op {
- graph_op_name: "InitializeTableFromTextFileV2"
- endpoint {
- name: "InitializeTableFromTextFileV2"
- }
- summary: "Initializes a table from a text file."
- description: <<END
-It inserts one key-value pair into the table for each line of the file.
-The key and value is extracted from the whole line content, elements from the
-split line based on `delimiter` or the line number (starting from zero).
-Where to extract the key and value from a line is specified by `key_index` and
-`value_index`.
-
-- A value of -1 means use the line number(starting from zero), expects `int64`.
-- A value of -2 means use the whole line content, expects `string`.
-- A value >= 0 means use the index (starting at zero) of the split line based
- on `delimiter`.
-END
-}
-op {
- graph_op_name: "InitializeTableV2"
- endpoint {
- name: "InitializeTableV2"
- }
- summary: "Table initializer that takes two tensors for keys and values respectively."
-}
-op {
- graph_op_name: "InterleaveDataset"
- endpoint {
- name: "InterleaveDataset"
- }
- summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
- description: <<END
-Unlike MapDataset, the `f` in InterleaveDataset is expected to return
-a Dataset variant, and InterleaveDataset will flatten successive
-results into a single Dataset. Unlike FlatMapDataset,
-InterleaveDataset will interleave sequences of up to `block_length`
-consecutive elements from `cycle_length` input elements.
-END
-}
-op {
- graph_op_name: "Inv"
- endpoint {
- name: "Inv"
- }
- summary: "Computes the reciprocal of x element-wise."
- description: <<END
-I.e., \\(y = 1 / x\\).
-END
-}
-op {
- graph_op_name: "InvGrad"
- endpoint {
- name: "InvGrad"
- }
- summary: "Computes the gradient for the inverse of `x` wrt its input."
- description: <<END
-Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
-is the corresponding input gradient.
-END
-}
-op {
- graph_op_name: "Invert"
- endpoint {
- name: "Invert"
- }
- summary: "Flips all bits elementwise."
- description: <<END
-The result will have exactly those bits set, that are not set in `x`. The
-computation is performed on the underlying representation of x.
-END
-}
-op {
- graph_op_name: "InvertPermutation"
- endpoint {
- name: "InvertPermutation"
- }
- summary: "Computes the inverse permutation of a tensor."
- description: <<END
-This operation computes the inverse of an index permutation. It takes a 1-D
-integer tensor `x`, which represents the indices of a zero-based array, and
-swaps each value with its index position. In other words, for an output tensor
-`y` and an input tensor `x`, this operation computes the following:
-
-`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
-
-The values must include 0. There can be no duplicate values or negative values.
-
-For example:
-
-```
-# tensor `x` is [3, 4, 0, 2, 1]
-invert_permutation(x) ==> [2, 4, 3, 0, 1]
-```
-END
-}
-op {
- graph_op_name: "IsFinite"
- endpoint {
- name: "IsFinite"
- }
- summary: "Returns which elements of x are finite."
- description: <<END
-@compatibility(numpy)
-Equivalent to np.isfinite
-@end_compatibility
-END
-}
-op {
- graph_op_name: "IsInf"
- endpoint {
- name: "IsInf"
- }
- summary: "Returns which elements of x are Inf."
- description: <<END
-@compatibility(numpy)
-Equivalent to np.isinf
-@end_compatibility
-END
-}
-op {
- graph_op_name: "IsNan"
- endpoint {
- name: "IsNan"
- }
- summary: "Returns which elements of x are NaN."
- description: <<END
-@compatibility(numpy)
-Equivalent to np.isnan
-@end_compatibility
-END
-}
-op {
- graph_op_name: "IsVariableInitialized"
- endpoint {
- name: "IsVariableInitialized"
- }
- summary: "Checks whether a tensor has been initialized."
- description: <<END
-Outputs boolean scalar indicating whether the tensor has been initialized.
-END
-}
-op {
- graph_op_name: "Iterator"
- endpoint {
- name: "Iterator"
- }
- summary: "A container for an iterator resource."
-}
-op {
- graph_op_name: "IteratorFromStringHandle"
- endpoint {
- name: "IteratorFromStringHandle"
- }
- summary: "Converts the given string representing a handle to an iterator to a resource."
-}
-op {
- graph_op_name: "IteratorGetNext"
- endpoint {
- name: "IteratorGetNext"
- }
- summary: "Gets the next output from the given iterator."
-}
-op {
- graph_op_name: "IteratorToStringHandle"
- endpoint {
- name: "IteratorToStringHandle"
- }
- summary: "Converts the given `resource_handle` representing an iterator to a string."
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_IFFT.pbtxt b/tensorflow/core/api_def/base_api/api_def_IFFT.pbtxt
new file mode 100644
index 0000000000..b793c99cf7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IFFT.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "IFFT"
+ in_arg {
+ name: "input"
+ description: <<END
+A complex64 tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A complex64 tensor of the same shape as `input`. The inner-most
+ dimension of `input` is replaced with its inverse 1D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.ifft
+@end_compatibility
+END
+ }
+ summary: "Inverse fast Fourier transform."
+ description: <<END
+Computes the inverse 1-dimensional discrete Fourier transform over the
+inner-most dimension of `input`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IFFT2D.pbtxt b/tensorflow/core/api_def/base_api/api_def_IFFT2D.pbtxt
new file mode 100644
index 0000000000..7f38f14308
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IFFT2D.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "IFFT2D"
+ in_arg {
+ name: "input"
+ description: <<END
+A complex64 tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A complex64 tensor of the same shape as `input`. The inner-most 2
+ dimensions of `input` are replaced with their inverse 2D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.ifft2
+@end_compatibility
+END
+ }
+ summary: "Inverse 2D fast Fourier transform."
+ description: <<END
+Computes the inverse 2-dimensional discrete Fourier transform over the
+inner-most 2 dimensions of `input`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IFFT3D.pbtxt b/tensorflow/core/api_def/base_api/api_def_IFFT3D.pbtxt
new file mode 100644
index 0000000000..52f1118775
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IFFT3D.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "IFFT3D"
+ in_arg {
+ name: "input"
+ description: <<END
+A complex64 tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A complex64 tensor of the same shape as `input`. The inner-most 3
+ dimensions of `input` are replaced with their inverse 3D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.ifftn with 3 dimensions.
+@end_compatibility
+END
+ }
+ summary: "Inverse 3D fast Fourier transform."
+ description: <<END
+Computes the inverse 3-dimensional discrete Fourier transform over the
+inner-most 3 dimensions of `input`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IRFFT.pbtxt b/tensorflow/core/api_def/base_api/api_def_IRFFT.pbtxt
new file mode 100644
index 0000000000..1e1caa9ead
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IRFFT.pbtxt
@@ -0,0 +1,43 @@
+op {
+ graph_op_name: "IRFFT"
+ in_arg {
+ name: "input"
+ description: <<END
+A complex64 tensor.
+END
+ }
+ in_arg {
+ name: "fft_length"
+ description: <<END
+An int32 tensor of shape [1]. The FFT length.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A float32 tensor of the same rank as `input`. The inner-most
+ dimension of `input` is replaced with the `fft_length` samples of its inverse
+ 1D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.irfft
+@end_compatibility
+END
+ }
+ summary: "Inverse real-valued fast Fourier transform."
+ description: <<END
+Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
+signal over the inner-most dimension of `input`.
+
+The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
+`fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
+`fft_length` is not provided, it is computed from the size of the inner-most
+dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
+compute `input` is odd, it should be provided since it cannot be inferred
+properly.
+
+Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
+than the corresponding dimension of `input`, the dimension is cropped. If it is
+larger, the dimension is padded with zeros.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IRFFT2D.pbtxt b/tensorflow/core/api_def/base_api/api_def_IRFFT2D.pbtxt
new file mode 100644
index 0000000000..9b7390a385
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IRFFT2D.pbtxt
@@ -0,0 +1,44 @@
+op {
+ graph_op_name: "IRFFT2D"
+ in_arg {
+ name: "input"
+ description: <<END
+A complex64 tensor.
+END
+ }
+ in_arg {
+ name: "fft_length"
+ description: <<END
+An int32 tensor of shape [2]. The FFT length for each dimension.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A float32 tensor of the same rank as `input`. The inner-most 2
+ dimensions of `input` are replaced with the `fft_length` samples of their
+ inverse 2D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.irfft2
+@end_compatibility
+END
+ }
+ summary: "Inverse 2D real-valued fast Fourier transform."
+ description: <<END
+Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
+signal over the inner-most 2 dimensions of `input`.
+
+The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
+The inner-most dimension contains the `fft_length / 2 + 1` unique components of
+the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
+from the size of the inner-most 2 dimensions of `input`. If the FFT length used
+to compute `input` is odd, it should be provided since it cannot be inferred
+properly.
+
+Along each axis `IRFFT2D` is computed on, if `fft_length` (or
+`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
+corresponding dimension of `input`, the dimension is cropped. If it is larger,
+the dimension is padded with zeros.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IRFFT3D.pbtxt b/tensorflow/core/api_def/base_api/api_def_IRFFT3D.pbtxt
new file mode 100644
index 0000000000..1cee2ceeff
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IRFFT3D.pbtxt
@@ -0,0 +1,44 @@
+op {
+ graph_op_name: "IRFFT3D"
+ in_arg {
+ name: "input"
+ description: <<END
+A complex64 tensor.
+END
+ }
+ in_arg {
+ name: "fft_length"
+ description: <<END
+An int32 tensor of shape [3]. The FFT length for each dimension.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A float32 tensor of the same rank as `input`. The inner-most 3
+ dimensions of `input` are replaced with the `fft_length` samples of their
+ inverse 3D real Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.irfftn with 3 dimensions.
+@end_compatibility
+END
+ }
+ summary: "Inverse 3D real-valued fast Fourier transform."
+ description: <<END
+Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
+signal over the inner-most 3 dimensions of `input`.
+
+The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
+The inner-most dimension contains the `fft_length / 2 + 1` unique components of
+the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
+from the size of the inner-most 3 dimensions of `input`. If the FFT length used
+to compute `input` is odd, it should be provided since it cannot be inferred
+properly.
+
+Along each axis `IRFFT3D` is computed on, if `fft_length` (or
+`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
+corresponding dimension of `input`, the dimension is cropped. If it is larger,
+the dimension is padded with zeros.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Identity.pbtxt b/tensorflow/core/api_def/base_api/api_def_Identity.pbtxt
new file mode 100644
index 0000000000..a2eb82e890
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Identity.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Identity"
+ summary: "Return a tensor with the same shape and contents as the input tensor or value."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IdentityN.pbtxt b/tensorflow/core/api_def/base_api/api_def_IdentityN.pbtxt
new file mode 100644
index 0000000000..45c213bce1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IdentityN.pbtxt
@@ -0,0 +1,21 @@
+op {
+ graph_op_name: "IdentityN"
+ summary: "Returns a list of tensors with the same shapes and contents as the input"
+ description: <<END
+tensors.
+
+This op can be used to override the gradient for complicated functions. For
+example, suppose y = f(x) and we wish to apply a custom function g for backprop
+such that dx = g(dy). In Python,
+
+```python
+with tf.get_default_graph().gradient_override_map(
+ {'IdentityN': 'OverrideGradientWithG'}):
+ y, _ = identity_n([f(x), x])
+
+@tf.RegisterGradient('OverrideGradientWithG')
+def ApplyG(op, dy, _):
+ return [None, g(dy)] # Do not backprop to f(x).
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IdentityReader.pbtxt b/tensorflow/core/api_def/base_api/api_def_IdentityReader.pbtxt
new file mode 100644
index 0000000000..9747d5c18c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IdentityReader.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "IdentityReader"
+ visibility: SKIP
+ out_arg {
+ name: "reader_handle"
+ description: <<END
+The handle to reference the Reader.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this reader is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this reader is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ summary: "A Reader that outputs the queued work as both the key and value."
+ description: <<END
+To use, enqueue strings in a Queue. ReaderRead will take the front
+work string and output (work, work).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IdentityReaderV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_IdentityReaderV2.pbtxt
new file mode 100644
index 0000000000..71ef011599
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IdentityReaderV2.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "IdentityReaderV2"
+ endpoint {
+ name: "IdentityReader"
+ }
+ out_arg {
+ name: "reader_handle"
+ description: <<END
+The handle to reference the Reader.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this reader is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this reader is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ summary: "A Reader that outputs the queued work as both the key and value."
+ description: <<END
+To use, enqueue strings in a Queue. ReaderRead will take the front
+work string and output (work, work).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Igamma.pbtxt b/tensorflow/core/api_def/base_api/api_def_Igamma.pbtxt
new file mode 100644
index 0000000000..e7bc5ddae2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Igamma.pbtxt
@@ -0,0 +1,19 @@
+op {
+ graph_op_name: "Igamma"
+ summary: "Compute the lower regularized incomplete Gamma function `Q(a, x)`."
+ description: <<END
+The lower regularized incomplete Gamma function is defined as:
+
+
+\\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
+
+where
+
+\\(gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt\\)
+
+is the lower incomplete Gamma function.
+
+Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
+Gamma function.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Igammac.pbtxt b/tensorflow/core/api_def/base_api/api_def_Igammac.pbtxt
new file mode 100644
index 0000000000..12f8416774
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Igammac.pbtxt
@@ -0,0 +1,18 @@
+op {
+ graph_op_name: "Igammac"
+ summary: "Compute the upper regularized incomplete Gamma function `Q(a, x)`."
+ description: <<END
+The upper regularized incomplete Gamma function is defined as:
+
+\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
+
+where
+
+\\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
+
+is the upper incomplete Gama function.
+
+Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
+Gamma function.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IgnoreErrorsDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_IgnoreErrorsDataset.pbtxt
new file mode 100644
index 0000000000..e492d90287
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IgnoreErrorsDataset.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "IgnoreErrorsDataset"
+ summary: "Creates a dataset that contains the elements of `input_dataset` ignoring errors."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Imag.pbtxt b/tensorflow/core/api_def/base_api/api_def_Imag.pbtxt
new file mode 100644
index 0000000000..8c3bb67431
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Imag.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "Imag"
+ summary: "Returns the imaginary part of a complex number."
+ description: <<END
+Given a tensor `input` of complex numbers, this operation returns a tensor of
+type `float` that is the imaginary part of each element in `input`. All
+elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
+is the real part and *b* is the imaginary part returned by this operation.
+
+For example:
+
+```
+# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+tf.imag(input) ==> [4.75, 5.75]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ImageSummary.pbtxt b/tensorflow/core/api_def/base_api/api_def_ImageSummary.pbtxt
new file mode 100644
index 0000000000..9b00f5b19d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ImageSummary.pbtxt
@@ -0,0 +1,70 @@
+op {
+ graph_op_name: "ImageSummary"
+ in_arg {
+ name: "tag"
+ description: <<END
+Scalar. Used to build the `tag` attribute of the summary values.
+END
+ }
+ in_arg {
+ name: "tensor"
+ description: <<END
+4-D of shape `[batch_size, height, width, channels]` where
+`channels` is 1, 3, or 4.
+END
+ }
+ out_arg {
+ name: "summary"
+ description: <<END
+Scalar. Serialized `Summary` protocol buffer.
+END
+ }
+ attr {
+ name: "max_images"
+ description: <<END
+Max number of batch elements to generate images for.
+END
+ }
+ attr {
+ name: "bad_color"
+ description: <<END
+Color to use for pixels with non-finite values.
+END
+ }
+ summary: "Outputs a `Summary` protocol buffer with images."
+ description: <<END
+The summary has up to `max_images` summary values containing images. The
+images are built from `tensor` which must be 4-D with shape `[batch_size,
+height, width, channels]` and where `channels` can be:
+
+* 1: `tensor` is interpreted as Grayscale.
+* 3: `tensor` is interpreted as RGB.
+* 4: `tensor` is interpreted as RGBA.
+
+The images have the same number of channels as the input tensor. For float
+input, the values are normalized one image at a time to fit in the range
+`[0, 255]`. `uint8` values are unchanged. The op uses two different
+normalization algorithms:
+
+* If the input values are all positive, they are rescaled so the largest one
+ is 255.
+
+* If any input value is negative, the values are shifted so input value 0.0
+ is at 127. They are then rescaled so that either the smallest value is 0,
+ or the largest one is 255.
+
+The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+build the `tag` of the summary values:
+
+* If `max_images` is 1, the summary value tag is '*tag*/image'.
+* If `max_images` is greater than 1, the summary value tags are
+ generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
+
+The `bad_color` argument is the color to use in the generated images for
+non-finite input values. It is a `unit8` 1-D tensor of length `channels`.
+Each element must be in the range `[0, 255]` (It represents the value of a
+pixel in the output image). Non-finite values in the input tensor are
+replaced by this tensor in the output image. The default value is the color
+red.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ImmutableConst.pbtxt b/tensorflow/core/api_def/base_api/api_def_ImmutableConst.pbtxt
new file mode 100644
index 0000000000..658629df38
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ImmutableConst.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "ImmutableConst"
+ attr {
+ name: "dtype"
+ description: <<END
+Type of the returned tensor.
+END
+ }
+ attr {
+ name: "shape"
+ description: <<END
+Shape of the returned tensor.
+END
+ }
+ attr {
+ name: "memory_region_name"
+ description: <<END
+Name of readonly memory region used by the tensor, see
+NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
+END
+ }
+ summary: "Returns immutable tensor from memory region."
+ description: <<END
+The current implementation memmaps the tensor from a file.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_InTopK.pbtxt b/tensorflow/core/api_def/base_api/api_def_InTopK.pbtxt
new file mode 100644
index 0000000000..e11d6e59c7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_InTopK.pbtxt
@@ -0,0 +1,44 @@
+op {
+ graph_op_name: "InTopK"
+ in_arg {
+ name: "predictions"
+ description: <<END
+A `batch_size` x `classes` tensor.
+END
+ }
+ in_arg {
+ name: "targets"
+ description: <<END
+A `batch_size` vector of class ids.
+END
+ }
+ out_arg {
+ name: "precision"
+ description: <<END
+Computed Precision at `k` as a `bool Tensor`.
+END
+ }
+ attr {
+ name: "k"
+ description: <<END
+Number of top elements to look at for computing precision.
+END
+ }
+ summary: "Says whether the targets are in the top `K` predictions."
+ description: <<END
+This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
+prediction for the target class is among the top `k` predictions among
+all predictions for example `i`. Note that the behavior of `InTopK` differs
+from the `TopK` op in its handling of ties; if multiple classes have the
+same prediction value and straddle the top-`k` boundary, all of those
+classes are considered to be in the top `k`.
+
+More formally, let
+
+ \\(predictions_i\\) be the predictions for all classes for example `i`,
+ \\(targets_i\\) be the target class for example `i`,
+ \\(out_i\\) be the output for example `i`,
+
+$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_InTopKV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_InTopKV2.pbtxt
new file mode 100644
index 0000000000..6f418ce0ec
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_InTopKV2.pbtxt
@@ -0,0 +1,44 @@
+op {
+ graph_op_name: "InTopKV2"
+ in_arg {
+ name: "predictions"
+ description: <<END
+A `batch_size` x `classes` tensor.
+END
+ }
+ in_arg {
+ name: "targets"
+ description: <<END
+A `batch_size` vector of class ids.
+END
+ }
+ in_arg {
+ name: "k"
+ description: <<END
+Number of top elements to look at for computing precision.
+END
+ }
+ out_arg {
+ name: "precision"
+ description: <<END
+Computed precision at `k` as a `bool Tensor`.
+END
+ }
+ summary: "Says whether the targets are in the top `K` predictions."
+ description: <<END
+This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
+prediction for the target class is among the top `k` predictions among
+all predictions for example `i`. Note that the behavior of `InTopK` differs
+from the `TopK` op in its handling of ties; if multiple classes have the
+same prediction value and straddle the top-`k` boundary, all of those
+classes are considered to be in the top `k`.
+
+More formally, let
+
+ \\(predictions_i\\) be the predictions for all classes for example `i`,
+ \\(targets_i\\) be the target class for example `i`,
+ \\(out_i\\) be the output for example `i`,
+
+$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_InitializeTable.pbtxt b/tensorflow/core/api_def/base_api/api_def_InitializeTable.pbtxt
new file mode 100644
index 0000000000..0f9a01a616
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_InitializeTable.pbtxt
@@ -0,0 +1,23 @@
+op {
+ graph_op_name: "InitializeTable"
+ visibility: SKIP
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table which will be initialized.
+END
+ }
+ in_arg {
+ name: "keys"
+ description: <<END
+Keys of type Tkey.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+Values of type Tval.
+END
+ }
+ summary: "Table initializer that takes two tensors for keys and values respectively."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_InitializeTableFromTextFile.pbtxt b/tensorflow/core/api_def/base_api/api_def_InitializeTableFromTextFile.pbtxt
new file mode 100644
index 0000000000..c1b2888cd4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_InitializeTableFromTextFile.pbtxt
@@ -0,0 +1,54 @@
+op {
+ graph_op_name: "InitializeTableFromTextFile"
+ visibility: SKIP
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table which will be initialized.
+END
+ }
+ in_arg {
+ name: "filename"
+ description: <<END
+Filename of a vocabulary text file.
+END
+ }
+ attr {
+ name: "key_index"
+ description: <<END
+Column index in a line to get the table `key` values from.
+END
+ }
+ attr {
+ name: "value_index"
+ description: <<END
+Column index that represents information of a line to get the table
+`value` values from.
+END
+ }
+ attr {
+ name: "vocab_size"
+ description: <<END
+Number of elements of the file, use -1 if unknown.
+END
+ }
+ attr {
+ name: "delimiter"
+ description: <<END
+Delimiter to separate fields in a line.
+END
+ }
+ summary: "Initializes a table from a text file."
+ description: <<END
+It inserts one key-value pair into the table for each line of the file.
+The key and value is extracted from the whole line content, elements from the
+split line based on `delimiter` or the line number (starting from zero).
+Where to extract the key and value from a line is specified by `key_index` and
+`value_index`.
+
+- A value of -1 means use the line number(starting from zero), expects `int64`.
+- A value of -2 means use the whole line content, expects `string`.
+- A value >= 0 means use the index (starting at zero) of the split line based
+ on `delimiter`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_InitializeTableFromTextFileV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_InitializeTableFromTextFileV2.pbtxt
new file mode 100644
index 0000000000..d2735af4f2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_InitializeTableFromTextFileV2.pbtxt
@@ -0,0 +1,56 @@
+op {
+ graph_op_name: "InitializeTableFromTextFileV2"
+ endpoint {
+ name: "InitializeTableFromTextFile"
+ }
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table which will be initialized.
+END
+ }
+ in_arg {
+ name: "filename"
+ description: <<END
+Filename of a vocabulary text file.
+END
+ }
+ attr {
+ name: "key_index"
+ description: <<END
+Column index in a line to get the table `key` values from.
+END
+ }
+ attr {
+ name: "value_index"
+ description: <<END
+Column index that represents information of a line to get the table
+`value` values from.
+END
+ }
+ attr {
+ name: "vocab_size"
+ description: <<END
+Number of elements of the file, use -1 if unknown.
+END
+ }
+ attr {
+ name: "delimiter"
+ description: <<END
+Delimiter to separate fields in a line.
+END
+ }
+ summary: "Initializes a table from a text file."
+ description: <<END
+It inserts one key-value pair into the table for each line of the file.
+The key and value is extracted from the whole line content, elements from the
+split line based on `delimiter` or the line number (starting from zero).
+Where to extract the key and value from a line is specified by `key_index` and
+`value_index`.
+
+- A value of -1 means use the line number(starting from zero), expects `int64`.
+- A value of -2 means use the whole line content, expects `string`.
+- A value >= 0 means use the index (starting at zero) of the split line based
+ on `delimiter`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_InitializeTableV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_InitializeTableV2.pbtxt
new file mode 100644
index 0000000000..a32a816da8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_InitializeTableV2.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "InitializeTableV2"
+ endpoint {
+ name: "InitializeTable"
+ }
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table which will be initialized.
+END
+ }
+ in_arg {
+ name: "keys"
+ description: <<END
+Keys of type Tkey.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+Values of type Tval.
+END
+ }
+ summary: "Table initializer that takes two tensors for keys and values respectively."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_InterleaveDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_InterleaveDataset.pbtxt
new file mode 100644
index 0000000000..bec2828e24
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_InterleaveDataset.pbtxt
@@ -0,0 +1,19 @@
+op {
+ graph_op_name: "InterleaveDataset"
+ attr {
+ name: "f"
+ description: <<END
+A function mapping elements of `input_dataset`, concatenated with
+`other_arguments`, to a Dataset variant that contains elements matching
+`output_types` and `output_shapes`.
+END
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
+ description: <<END
+Unlike MapDataset, the `f` in InterleaveDataset is expected to return
+a Dataset variant, and InterleaveDataset will flatten successive
+results into a single Dataset. Unlike FlatMapDataset,
+InterleaveDataset will interleave sequences of up to `block_length`
+consecutive elements from `cycle_length` input elements.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Inv.pbtxt b/tensorflow/core/api_def/base_api/api_def_Inv.pbtxt
new file mode 100644
index 0000000000..fc63276e34
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Inv.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "Inv"
+ summary: "Computes the reciprocal of x element-wise."
+ description: <<END
+I.e., \\(y = 1 / x\\).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_InvGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_InvGrad.pbtxt
new file mode 100644
index 0000000000..de2f510eb9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_InvGrad.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "InvGrad"
+ visibility: HIDDEN
+ summary: "Computes the gradient for the inverse of `x` wrt its input."
+ description: <<END
+Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
+is the corresponding input gradient.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Invert.pbtxt b/tensorflow/core/api_def/base_api/api_def_Invert.pbtxt
new file mode 100644
index 0000000000..4847a500a8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Invert.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Invert"
+ summary: "Flips all bits elementwise."
+ description: <<END
+The result will have exactly those bits set, that are not set in `x`. The
+computation is performed on the underlying representation of x.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_InvertPermutation.pbtxt b/tensorflow/core/api_def/base_api/api_def_InvertPermutation.pbtxt
new file mode 100644
index 0000000000..66062d818e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_InvertPermutation.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "InvertPermutation"
+ in_arg {
+ name: "x"
+ description: <<END
+1-D.
+END
+ }
+ out_arg {
+ name: "y"
+ description: <<END
+1-D.
+END
+ }
+ summary: "Computes the inverse permutation of a tensor."
+ description: <<END
+This operation computes the inverse of an index permutation. It takes a 1-D
+integer tensor `x`, which represents the indices of a zero-based array, and
+swaps each value with its index position. In other words, for an output tensor
+`y` and an input tensor `x`, this operation computes the following:
+
+`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
+
+The values must include 0. There can be no duplicate values or negative values.
+
+For example:
+
+```
+# tensor `x` is [3, 4, 0, 2, 1]
+invert_permutation(x) ==> [2, 4, 3, 0, 1]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IsFinite.pbtxt b/tensorflow/core/api_def/base_api/api_def_IsFinite.pbtxt
new file mode 100644
index 0000000000..bccc0e32c1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IsFinite.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "IsFinite"
+ summary: "Returns which elements of x are finite."
+ description: <<END
+@compatibility(numpy)
+Equivalent to np.isfinite
+@end_compatibility
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IsInf.pbtxt b/tensorflow/core/api_def/base_api/api_def_IsInf.pbtxt
new file mode 100644
index 0000000000..5c390f32d3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IsInf.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "IsInf"
+ summary: "Returns which elements of x are Inf."
+ description: <<END
+@compatibility(numpy)
+Equivalent to np.isinf
+@end_compatibility
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IsNan.pbtxt b/tensorflow/core/api_def/base_api/api_def_IsNan.pbtxt
new file mode 100644
index 0000000000..1487fad927
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IsNan.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "IsNan"
+ summary: "Returns which elements of x are NaN."
+ description: <<END
+@compatibility(numpy)
+Equivalent to np.isnan
+@end_compatibility
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IsVariableInitialized.pbtxt b/tensorflow/core/api_def/base_api/api_def_IsVariableInitialized.pbtxt
new file mode 100644
index 0000000000..d631da711d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IsVariableInitialized.pbtxt
@@ -0,0 +1,19 @@
+op {
+ graph_op_name: "IsVariableInitialized"
+ in_arg {
+ name: "ref"
+ description: <<END
+Should be from a `Variable` node. May be uninitialized.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of elements in the variable tensor.
+END
+ }
+ summary: "Checks whether a tensor has been initialized."
+ description: <<END
+Outputs boolean scalar indicating whether the tensor has been initialized.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Iterator.pbtxt b/tensorflow/core/api_def/base_api/api_def_Iterator.pbtxt
new file mode 100644
index 0000000000..660267c221
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Iterator.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "Iterator"
+ out_arg {
+ name: "handle"
+ description: <<END
+A handle to the iterator that can be passed to a "MakeIterator"
+or "IteratorGetNext" op.
+END
+ }
+ summary: "A container for an iterator resource."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IteratorFromStringHandle.pbtxt b/tensorflow/core/api_def/base_api/api_def_IteratorFromStringHandle.pbtxt
new file mode 100644
index 0000000000..cd7e382edb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IteratorFromStringHandle.pbtxt
@@ -0,0 +1,30 @@
+op {
+ graph_op_name: "IteratorFromStringHandle"
+ in_arg {
+ name: "string_handle"
+ description: <<END
+A string representation of the given handle.
+END
+ }
+ out_arg {
+ name: "resource_handle"
+ description: <<END
+A handle to an iterator resource.
+END
+ }
+ attr {
+ name: "output_types"
+ description: <<END
+If specified, defines the type of each tuple component in an
+element produced by the resulting iterator.
+END
+ }
+ attr {
+ name: "output_shapes"
+ description: <<END
+If specified, defines the shape of each tuple component in an
+element produced by the resulting iterator.
+END
+ }
+ summary: "Converts the given string representing a handle to an iterator to a resource."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IteratorGetNext.pbtxt b/tensorflow/core/api_def/base_api/api_def_IteratorGetNext.pbtxt
new file mode 100644
index 0000000000..ea5669693e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IteratorGetNext.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "IteratorGetNext"
+ summary: "Gets the next output from the given iterator."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_IteratorToStringHandle.pbtxt b/tensorflow/core/api_def/base_api/api_def_IteratorToStringHandle.pbtxt
new file mode 100644
index 0000000000..cf446b4127
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_IteratorToStringHandle.pbtxt
@@ -0,0 +1,16 @@
+op {
+ graph_op_name: "IteratorToStringHandle"
+ in_arg {
+ name: "resource_handle"
+ description: <<END
+A handle to an iterator resource.
+END
+ }
+ out_arg {
+ name: "string_handle"
+ description: <<END
+A string representation of the given handle.
+END
+ }
+ summary: "Converts the given `resource_handle` representing an iterator to a string."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_L.pbtxt b/tensorflow/core/api_def/base_api/api_def_L.pbtxt
deleted file mode 100644
index 09e55eacc7..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_L.pbtxt
+++ /dev/null
@@ -1,392 +0,0 @@
-op {
- graph_op_name: "L2Loss"
- endpoint {
- name: "L2Loss"
- }
- summary: "L2 Loss."
- description: <<END
-Computes half the L2 norm of a tensor without the `sqrt`:
-
- output = sum(t ** 2) / 2
-END
-}
-op {
- graph_op_name: "LMDBReader"
- endpoint {
- name: "LMDBReader"
- }
- summary: "A Reader that outputs the records from a LMDB file."
-}
-op {
- graph_op_name: "LRN"
- endpoint {
- name: "LRN"
- }
- summary: "Local Response Normalization."
- description: <<END
-The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
-dimension), and each vector is normalized independently. Within a given vector,
-each component is divided by the weighted, squared sum of inputs within
-`depth_radius`. In detail,
-
- sqr_sum[a, b, c, d] =
- sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
- output = input / (bias + alpha * sqr_sum) ** beta
-
-For details, see [Krizhevsky et al., ImageNet classification with deep
-convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
-END
-}
-op {
- graph_op_name: "LRNGrad"
- endpoint {
- name: "LRNGrad"
- }
- summary: "Gradients for Local Response Normalization."
-}
-op {
- graph_op_name: "LearnedUnigramCandidateSampler"
- endpoint {
- name: "LearnedUnigramCandidateSampler"
- }
- summary: "Generates labels for candidate sampling with a learned unigram distribution."
- description: <<END
-See explanations of candidate sampling and the data formats at
-go/candidate-sampling.
-
-For each batch, this op picks a single set of sampled candidate labels.
-
-The advantages of sampling candidates per-batch are simplicity and the
-possibility of efficient dense matrix multiplication. The disadvantage is that
-the sampled candidates must be chosen independently of the context and of the
-true labels.
-END
-}
-op {
- graph_op_name: "Less"
- endpoint {
- name: "Less"
- }
- summary: "Returns the truth value of (x < y) element-wise."
- description: <<END
-*NOTE*: `Less` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "LessEqual"
- endpoint {
- name: "LessEqual"
- }
- summary: "Returns the truth value of (x <= y) element-wise."
- description: <<END
-*NOTE*: `LessEqual` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "Lgamma"
- endpoint {
- name: "Lgamma"
- }
- summary: "Computes the log of the absolute value of `Gamma(x)` element-wise."
-}
-op {
- graph_op_name: "LinSpace"
- endpoint {
- name: "LinSpace"
- }
- summary: "Generates values in an interval."
- description: <<END
-A sequence of `num` evenly-spaced values are generated beginning at `start`.
-If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
-so that the last one is exactly `stop`.
-
-For example:
-
-```
-tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
-```
-END
-}
-op {
- graph_op_name: "ListDiff"
- endpoint {
- name: "ListDiff"
- }
- summary: "Computes the difference between two lists of numbers or strings."
- description: <<END
-Given a list `x` and a list `y`, this operation returns a list `out` that
-represents all values that are in `x` but not in `y`. The returned list `out`
-is sorted in the same order that the numbers appear in `x` (duplicates are
-preserved). This operation also returns a list `idx` that represents the
-position of each `out` element in `x`. In other words:
-
-`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
-
-For example, given this input:
-
-```
-x = [1, 2, 3, 4, 5, 6]
-y = [1, 3, 5]
-```
-
-This operation would return:
-
-```
-out ==> [2, 4, 6]
-idx ==> [1, 3, 5]
-```
-END
-}
-op {
- graph_op_name: "LoadAndRemapMatrix"
- endpoint {
- name: "LoadAndRemapMatrix"
- }
- summary: "Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint"
- description: <<END
-at `ckpt_path` and potentially reorders its rows and columns using the
-specified remappings.
-
-Most users should use one of the wrapper initializers (such as
-`tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this
-function directly.
-
-The remappings are 1-D tensors with the following properties:
-
-* `row_remapping` must have exactly `num_rows` entries. Row `i` of the output
- matrix will be initialized from the row corresponding to index
- `row_remapping[i]` in the old `Tensor` from the checkpoint.
-* `col_remapping` must have either 0 entries (indicating that no column
- reordering is needed) or `num_cols` entries. If specified, column `j` of the
- output matrix will be initialized from the column corresponding to index
- `col_remapping[j]` in the old `Tensor` from the checkpoint.
-* A value of -1 in either of the remappings signifies a "missing" entry. In that
- case, values from the `initializing_values` tensor will be used to fill that
- missing row or column. If `row_remapping` has `r` missing entries and
- `col_remapping` has `c` missing entries, then the following condition must be
- true:
-
-`(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
-
-The remapping tensors can be generated using the GenerateVocabRemapping op.
-
-As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
-initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
-the value from row i, column j of the old tensor in the checkpoint, the output
-matrix will look like the following:
-
-[[w(1, 0), w(1, 2), 0.5],
- [w(0, 0), w(0, 2), -0.5],
- [0.25, -0.25, 42]]
-END
-}
-op {
- graph_op_name: "Log"
- endpoint {
- name: "Log"
- }
- summary: "Computes natural logarithm of x element-wise."
- description: <<END
-I.e., \\(y = \log_e x\\).
-END
-}
-op {
- graph_op_name: "Log1p"
- endpoint {
- name: "Log1p"
- }
- summary: "Computes natural logarithm of (1 + x) element-wise."
- description: <<END
-I.e., \\(y = \log_e (1 + x)\\).
-END
-}
-op {
- graph_op_name: "LogMatrixDeterminant"
- endpoint {
- name: "LogMatrixDeterminant"
- }
- summary: "Computes the sign and the log of the absolute value of the determinant of"
- description: <<END
-one or more square matrices.
-
-The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions
-form square matrices. The outputs are two tensors containing the signs and
-absolute values of the log determinants for all N input submatrices
-`[..., :, :]` such that the determinant = sign*exp(log_abs_determinant).
-The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU
-is the LU decomposition of the input and P is the corresponding
-permutation matrix.
-END
-}
-op {
- graph_op_name: "LogSoftmax"
- endpoint {
- name: "LogSoftmax"
- }
- summary: "Computes log softmax activations."
- description: <<END
-For each batch `i` and class `j` we have
-
- logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
-END
-}
-op {
- graph_op_name: "LogUniformCandidateSampler"
- endpoint {
- name: "LogUniformCandidateSampler"
- }
- summary: "Generates labels for candidate sampling with a log-uniform distribution."
- description: <<END
-See explanations of candidate sampling and the data formats at
-go/candidate-sampling.
-
-For each batch, this op picks a single set of sampled candidate labels.
-
-The advantages of sampling candidates per-batch are simplicity and the
-possibility of efficient dense matrix multiplication. The disadvantage is that
-the sampled candidates must be chosen independently of the context and of the
-true labels.
-END
-}
-op {
- graph_op_name: "LogicalAnd"
- endpoint {
- name: "LogicalAnd"
- }
- summary: "Returns the truth value of x AND y element-wise."
- description: <<END
-*NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "LogicalNot"
- endpoint {
- name: "LogicalNot"
- }
- summary: "Returns the truth value of NOT x element-wise."
-}
-op {
- graph_op_name: "LogicalOr"
- endpoint {
- name: "LogicalOr"
- }
- summary: "Returns the truth value of x OR y element-wise."
- description: <<END
-*NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "LookupTableExport"
- endpoint {
- name: "LookupTableExport"
- }
- summary: "Outputs all keys and values in the table."
-}
-op {
- graph_op_name: "LookupTableExportV2"
- endpoint {
- name: "LookupTableExportV2"
- }
- summary: "Outputs all keys and values in the table."
-}
-op {
- graph_op_name: "LookupTableFind"
- endpoint {
- name: "LookupTableFind"
- }
- summary: "Looks up keys in a table, outputs the corresponding values."
- description: <<END
-The tensor `keys` must of the same type as the keys of the table.
-The output `values` is of the type of the table values.
-
-The scalar `default_value` is the value output for keys not present in the
-table. It must also be of the same type as the table values.
-END
-}
-op {
- graph_op_name: "LookupTableFindV2"
- endpoint {
- name: "LookupTableFindV2"
- }
- summary: "Looks up keys in a table, outputs the corresponding values."
- description: <<END
-The tensor `keys` must of the same type as the keys of the table.
-The output `values` is of the type of the table values.
-
-The scalar `default_value` is the value output for keys not present in the
-table. It must also be of the same type as the table values.
-END
-}
-op {
- graph_op_name: "LookupTableImport"
- endpoint {
- name: "LookupTableImport"
- }
- summary: "Replaces the contents of the table with the specified keys and values."
- description: <<END
-The tensor `keys` must be of the same type as the keys of the table.
-The tensor `values` must be of the type of the table values.
-END
-}
-op {
- graph_op_name: "LookupTableImportV2"
- endpoint {
- name: "LookupTableImportV2"
- }
- summary: "Replaces the contents of the table with the specified keys and values."
- description: <<END
-The tensor `keys` must be of the same type as the keys of the table.
-The tensor `values` must be of the type of the table values.
-END
-}
-op {
- graph_op_name: "LookupTableInsert"
- endpoint {
- name: "LookupTableInsert"
- }
- summary: "Updates the table to associates keys with values."
- description: <<END
-The tensor `keys` must be of the same type as the keys of the table.
-The tensor `values` must be of the type of the table values.
-END
-}
-op {
- graph_op_name: "LookupTableInsertV2"
- endpoint {
- name: "LookupTableInsertV2"
- }
- summary: "Updates the table to associates keys with values."
- description: <<END
-The tensor `keys` must be of the same type as the keys of the table.
-The tensor `values` must be of the type of the table values.
-END
-}
-op {
- graph_op_name: "LookupTableSize"
- endpoint {
- name: "LookupTableSize"
- }
- summary: "Computes the number of elements in the given table."
-}
-op {
- graph_op_name: "LookupTableSizeV2"
- endpoint {
- name: "LookupTableSizeV2"
- }
- summary: "Computes the number of elements in the given table."
-}
-op {
- graph_op_name: "LoopCond"
- endpoint {
- name: "LoopCond"
- }
- summary: "Forwards the input to the output."
- description: <<END
-This operator represents the loop termination condition used by the
-"pivot" switches of a loop.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_L2Loss.pbtxt b/tensorflow/core/api_def/base_api/api_def_L2Loss.pbtxt
new file mode 100644
index 0000000000..eaf4b4ec35
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_L2Loss.pbtxt
@@ -0,0 +1,21 @@
+op {
+ graph_op_name: "L2Loss"
+ in_arg {
+ name: "t"
+ description: <<END
+Typically 2-D, but may have any dimensions.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+0-D.
+END
+ }
+ summary: "L2 Loss."
+ description: <<END
+Computes half the L2 norm of a tensor without the `sqrt`:
+
+ output = sum(t ** 2) / 2
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LMDBReader.pbtxt b/tensorflow/core/api_def/base_api/api_def_LMDBReader.pbtxt
new file mode 100644
index 0000000000..28d19e8658
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LMDBReader.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "LMDBReader"
+ out_arg {
+ name: "reader_handle"
+ description: <<END
+The handle to reference the Reader.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this reader is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this reader is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ summary: "A Reader that outputs the records from a LMDB file."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LRN.pbtxt b/tensorflow/core/api_def/base_api/api_def_LRN.pbtxt
new file mode 100644
index 0000000000..9710882186
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LRN.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "LRN"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D.
+END
+ }
+ attr {
+ name: "depth_radius"
+ description: <<END
+0-D. Half-width of the 1-D normalization window.
+END
+ }
+ attr {
+ name: "bias"
+ description: <<END
+An offset (usually positive to avoid dividing by 0).
+END
+ }
+ attr {
+ name: "alpha"
+ description: <<END
+A scale factor, usually positive.
+END
+ }
+ attr {
+ name: "beta"
+ description: <<END
+An exponent.
+END
+ }
+ summary: "Local Response Normalization."
+ description: <<END
+The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
+dimension), and each vector is normalized independently. Within a given vector,
+each component is divided by the weighted, squared sum of inputs within
+`depth_radius`. In detail,
+
+ sqr_sum[a, b, c, d] =
+ sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
+ output = input / (bias + alpha * sqr_sum) ** beta
+
+For details, see [Krizhevsky et al., ImageNet classification with deep
+convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LRNGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_LRNGrad.pbtxt
new file mode 100644
index 0000000000..6b2b289ba6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LRNGrad.pbtxt
@@ -0,0 +1,53 @@
+op {
+ graph_op_name: "LRNGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "input_grads"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "input_image"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "output_image"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The gradients for LRN.
+END
+ }
+ attr {
+ name: "depth_radius"
+ description: <<END
+A depth radius.
+END
+ }
+ attr {
+ name: "bias"
+ description: <<END
+An offset (usually > 0 to avoid dividing by 0).
+END
+ }
+ attr {
+ name: "alpha"
+ description: <<END
+A scale factor, usually positive.
+END
+ }
+ attr {
+ name: "beta"
+ description: <<END
+An exponent.
+END
+ }
+ summary: "Gradients for Local Response Normalization."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LearnedUnigramCandidateSampler.pbtxt b/tensorflow/core/api_def/base_api/api_def_LearnedUnigramCandidateSampler.pbtxt
new file mode 100644
index 0000000000..7097884fde
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LearnedUnigramCandidateSampler.pbtxt
@@ -0,0 +1,86 @@
+op {
+ graph_op_name: "LearnedUnigramCandidateSampler"
+ in_arg {
+ name: "true_classes"
+ description: <<END
+A batch_size * num_true matrix, in which each row contains the
+IDs of the num_true target_classes in the corresponding original label.
+END
+ }
+ out_arg {
+ name: "sampled_candidates"
+ description: <<END
+A vector of length num_sampled, in which each element is
+the ID of a sampled candidate.
+END
+ }
+ out_arg {
+ name: "true_expected_count"
+ description: <<END
+A batch_size * num_true matrix, representing
+the number of times each candidate is expected to occur in a batch
+of sampled candidates. If unique=true, then this is a probability.
+END
+ }
+ out_arg {
+ name: "sampled_expected_count"
+ description: <<END
+A vector of length num_sampled, for each sampled
+candidate representing the number of times the candidate is expected
+to occur in a batch of sampled candidates. If unique=true, then this is a
+probability.
+END
+ }
+ attr {
+ name: "num_true"
+ description: <<END
+Number of true labels per context.
+END
+ }
+ attr {
+ name: "num_sampled"
+ description: <<END
+Number of candidates to randomly sample.
+END
+ }
+ attr {
+ name: "unique"
+ description: <<END
+If unique is true, we sample with rejection, so that all sampled
+candidates in a batch are unique. This requires some approximation to
+estimate the post-rejection sampling probabilities.
+END
+ }
+ attr {
+ name: "range_max"
+ description: <<END
+The sampler will sample integers from the interval [0, range_max).
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+An second seed to avoid seed collision.
+END
+ }
+ summary: "Generates labels for candidate sampling with a learned unigram distribution."
+ description: <<END
+See explanations of candidate sampling and the data formats at
+go/candidate-sampling.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LeftShift.pbtxt b/tensorflow/core/api_def/base_api/api_def_LeftShift.pbtxt
new file mode 100644
index 0000000000..622a90d0c7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LeftShift.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "LeftShift"
+ summary: "Elementwise computes the bitwise left-shift of `x` and `y`."
+ description: <<END
+If `y` is negative, or greater than or equal to the width of `x` in bits the
+result is implementation defined.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Less.pbtxt b/tensorflow/core/api_def/base_api/api_def_Less.pbtxt
new file mode 100644
index 0000000000..104d583f42
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Less.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Less"
+ summary: "Returns the truth value of (x < y) element-wise."
+ description: <<END
+*NOTE*: `Less` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LessEqual.pbtxt b/tensorflow/core/api_def/base_api/api_def_LessEqual.pbtxt
new file mode 100644
index 0000000000..637fe2f47e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LessEqual.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "LessEqual"
+ summary: "Returns the truth value of (x <= y) element-wise."
+ description: <<END
+*NOTE*: `LessEqual` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Lgamma.pbtxt b/tensorflow/core/api_def/base_api/api_def_Lgamma.pbtxt
new file mode 100644
index 0000000000..fa93f30f38
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Lgamma.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Lgamma"
+ summary: "Computes the log of the absolute value of `Gamma(x)` element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LinSpace.pbtxt b/tensorflow/core/api_def/base_api/api_def_LinSpace.pbtxt
new file mode 100644
index 0000000000..94a4ef574d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LinSpace.pbtxt
@@ -0,0 +1,39 @@
+op {
+ graph_op_name: "LinSpace"
+ in_arg {
+ name: "start"
+ description: <<END
+First entry in the range.
+END
+ }
+ in_arg {
+ name: "stop"
+ description: <<END
+Last entry in the range.
+END
+ }
+ in_arg {
+ name: "num"
+ description: <<END
+Number of values to generate.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+1-D. The generated values.
+END
+ }
+ summary: "Generates values in an interval."
+ description: <<END
+A sequence of `num` evenly-spaced values are generated beginning at `start`.
+If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
+so that the last one is exactly `stop`.
+
+For example:
+
+```
+tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ListDiff.pbtxt b/tensorflow/core/api_def/base_api/api_def_ListDiff.pbtxt
new file mode 100644
index 0000000000..60a91dfaa6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ListDiff.pbtxt
@@ -0,0 +1,54 @@
+op {
+ graph_op_name: "ListDiff"
+ endpoint {
+ name: "SetDiff1D"
+ }
+ in_arg {
+ name: "x"
+ description: <<END
+1-D. Values to keep.
+END
+ }
+ in_arg {
+ name: "y"
+ description: <<END
+1-D. Values to remove.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+1-D. Values present in `x` but not in `y`.
+END
+ }
+ out_arg {
+ name: "idx"
+ description: <<END
+1-D. Positions of `x` values preserved in `out`.
+END
+ }
+ summary: "Computes the difference between two lists of numbers or strings."
+ description: <<END
+Given a list `x` and a list `y`, this operation returns a list `out` that
+represents all values that are in `x` but not in `y`. The returned list `out`
+is sorted in the same order that the numbers appear in `x` (duplicates are
+preserved). This operation also returns a list `idx` that represents the
+position of each `out` element in `x`. In other words:
+
+`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
+
+For example, given this input:
+
+```
+x = [1, 2, 3, 4, 5, 6]
+y = [1, 3, 5]
+```
+
+This operation would return:
+
+```
+out ==> [2, 4, 6]
+idx ==> [1, 3, 5]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LoadAndRemapMatrix.pbtxt b/tensorflow/core/api_def/base_api/api_def_LoadAndRemapMatrix.pbtxt
new file mode 100644
index 0000000000..e1e7007f07
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LoadAndRemapMatrix.pbtxt
@@ -0,0 +1,105 @@
+op {
+ graph_op_name: "LoadAndRemapMatrix"
+ in_arg {
+ name: "ckpt_path"
+ description: <<END
+Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from
+which the old matrix `Tensor` will be loaded.
+END
+ }
+ in_arg {
+ name: "old_tensor_name"
+ description: <<END
+Name of the 2-D `Tensor` to load from checkpoint.
+END
+ }
+ in_arg {
+ name: "row_remapping"
+ description: <<END
+An int `Tensor` of row remappings (generally created by
+`generate_vocab_remapping`). Even if no row remapping is needed, this must
+still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted
+index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
+END
+ }
+ in_arg {
+ name: "col_remapping"
+ description: <<END
+An int `Tensor` of column remappings (generally created by
+`generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping
+is to be done (e.g. column ordering is the same).
+END
+ }
+ in_arg {
+ name: "initializing_values"
+ description: <<END
+A float `Tensor` containing values to fill in for cells
+in the output matrix that are not loaded from the checkpoint. Length must be
+exactly the same as the number of missing / new cells.
+END
+ }
+ out_arg {
+ name: "output_matrix"
+ description: <<END
+Output matrix containing existing values loaded from the
+checkpoint, and with any missing values filled in from initializing_values.
+END
+ }
+ attr {
+ name: "num_rows"
+ description: <<END
+Number of rows (length of the 1st dimension) in the output matrix.
+END
+ }
+ attr {
+ name: "num_cols"
+ description: <<END
+Number of columns (length of the 2nd dimension) in the output matrix.
+END
+ }
+ attr {
+ name: "max_rows_in_memory"
+ description: <<END
+The maximum number of rows to load from the checkpoint at
+once. If less than or equal to 0, the entire matrix will be loaded into
+memory. Setting this arg trades increased disk reads for lower memory usage.
+END
+ }
+ summary: "Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint"
+ description: <<END
+at `ckpt_path` and potentially reorders its rows and columns using the
+specified remappings.
+
+Most users should use one of the wrapper initializers (such as
+`tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this
+function directly.
+
+The remappings are 1-D tensors with the following properties:
+
+* `row_remapping` must have exactly `num_rows` entries. Row `i` of the output
+ matrix will be initialized from the row corresponding to index
+ `row_remapping[i]` in the old `Tensor` from the checkpoint.
+* `col_remapping` must have either 0 entries (indicating that no column
+ reordering is needed) or `num_cols` entries. If specified, column `j` of the
+ output matrix will be initialized from the column corresponding to index
+ `col_remapping[j]` in the old `Tensor` from the checkpoint.
+* A value of -1 in either of the remappings signifies a "missing" entry. In that
+ case, values from the `initializing_values` tensor will be used to fill that
+ missing row or column. If `row_remapping` has `r` missing entries and
+ `col_remapping` has `c` missing entries, then the following condition must be
+ true:
+
+`(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
+
+The remapping tensors can be generated using the GenerateVocabRemapping op.
+
+As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
+initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
+the value from row i, column j of the old tensor in the checkpoint, the output
+matrix will look like the following:
+
+[[w(1, 0), w(1, 2), 0.5],
+ [w(0, 0), w(0, 2), -0.5],
+ [0.25, -0.25, 42]]
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Log.pbtxt b/tensorflow/core/api_def/base_api/api_def_Log.pbtxt
new file mode 100644
index 0000000000..056f1bc2e2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Log.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "Log"
+ summary: "Computes natural logarithm of x element-wise."
+ description: <<END
+I.e., \\(y = \log_e x\\).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Log1p.pbtxt b/tensorflow/core/api_def/base_api/api_def_Log1p.pbtxt
new file mode 100644
index 0000000000..cc9eb2682e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Log1p.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "Log1p"
+ summary: "Computes natural logarithm of (1 + x) element-wise."
+ description: <<END
+I.e., \\(y = \log_e (1 + x)\\).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LogMatrixDeterminant.pbtxt b/tensorflow/core/api_def/base_api/api_def_LogMatrixDeterminant.pbtxt
new file mode 100644
index 0000000000..8245f7d300
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LogMatrixDeterminant.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "LogMatrixDeterminant"
+ in_arg {
+ name: "input"
+ description: <<END
+Shape is `[N, M, M]`.
+END
+ }
+ out_arg {
+ name: "sign"
+ description: <<END
+The signs of the log determinants of the inputs. Shape is `[N]`.
+END
+ }
+ out_arg {
+ name: "log_abs_determinant"
+ description: <<END
+The logs of the absolute values of the determinants
+of the N input matrices. Shape is `[N]`.
+END
+ }
+ summary: "Computes the sign and the log of the absolute value of the determinant of"
+ description: <<END
+one or more square matrices.
+
+The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions
+form square matrices. The outputs are two tensors containing the signs and
+absolute values of the log determinants for all N input submatrices
+`[..., :, :]` such that the determinant = sign*exp(log_abs_determinant).
+The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU
+is the LU decomposition of the input and P is the corresponding
+permutation matrix.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LogSoftmax.pbtxt b/tensorflow/core/api_def/base_api/api_def_LogSoftmax.pbtxt
new file mode 100644
index 0000000000..ba02abdd0a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LogSoftmax.pbtxt
@@ -0,0 +1,21 @@
+op {
+ graph_op_name: "LogSoftmax"
+ in_arg {
+ name: "logits"
+ description: <<END
+2-D with shape `[batch_size, num_classes]`.
+END
+ }
+ out_arg {
+ name: "logsoftmax"
+ description: <<END
+Same shape as `logits`.
+END
+ }
+ summary: "Computes log softmax activations."
+ description: <<END
+For each batch `i` and class `j` we have
+
+ logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LogUniformCandidateSampler.pbtxt b/tensorflow/core/api_def/base_api/api_def_LogUniformCandidateSampler.pbtxt
new file mode 100644
index 0000000000..9c6807bcb2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LogUniformCandidateSampler.pbtxt
@@ -0,0 +1,86 @@
+op {
+ graph_op_name: "LogUniformCandidateSampler"
+ in_arg {
+ name: "true_classes"
+ description: <<END
+A batch_size * num_true matrix, in which each row contains the
+IDs of the num_true target_classes in the corresponding original label.
+END
+ }
+ out_arg {
+ name: "sampled_candidates"
+ description: <<END
+A vector of length num_sampled, in which each element is
+the ID of a sampled candidate.
+END
+ }
+ out_arg {
+ name: "true_expected_count"
+ description: <<END
+A batch_size * num_true matrix, representing
+the number of times each candidate is expected to occur in a batch
+of sampled candidates. If unique=true, then this is a probability.
+END
+ }
+ out_arg {
+ name: "sampled_expected_count"
+ description: <<END
+A vector of length num_sampled, for each sampled
+candidate representing the number of times the candidate is expected
+to occur in a batch of sampled candidates. If unique=true, then this is a
+probability.
+END
+ }
+ attr {
+ name: "num_true"
+ description: <<END
+Number of true labels per context.
+END
+ }
+ attr {
+ name: "num_sampled"
+ description: <<END
+Number of candidates to randomly sample.
+END
+ }
+ attr {
+ name: "unique"
+ description: <<END
+If unique is true, we sample with rejection, so that all sampled
+candidates in a batch are unique. This requires some approximation to
+estimate the post-rejection sampling probabilities.
+END
+ }
+ attr {
+ name: "range_max"
+ description: <<END
+The sampler will sample integers from the interval [0, range_max).
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+An second seed to avoid seed collision.
+END
+ }
+ summary: "Generates labels for candidate sampling with a log-uniform distribution."
+ description: <<END
+See explanations of candidate sampling and the data formats at
+go/candidate-sampling.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LogicalAnd.pbtxt b/tensorflow/core/api_def/base_api/api_def_LogicalAnd.pbtxt
new file mode 100644
index 0000000000..4ec78d02b0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LogicalAnd.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "LogicalAnd"
+ summary: "Returns the truth value of x AND y element-wise."
+ description: <<END
+*NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LogicalNot.pbtxt b/tensorflow/core/api_def/base_api/api_def_LogicalNot.pbtxt
new file mode 100644
index 0000000000..af29e920c9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LogicalNot.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LogicalNot"
+ summary: "Returns the truth value of NOT x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LogicalOr.pbtxt b/tensorflow/core/api_def/base_api/api_def_LogicalOr.pbtxt
new file mode 100644
index 0000000000..b4f31cd521
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LogicalOr.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "LogicalOr"
+ summary: "Returns the truth value of x OR y element-wise."
+ description: <<END
+*NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LookupTableExport.pbtxt b/tensorflow/core/api_def/base_api/api_def_LookupTableExport.pbtxt
new file mode 100644
index 0000000000..dfc721ddee
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LookupTableExport.pbtxt
@@ -0,0 +1,23 @@
+op {
+ graph_op_name: "LookupTableExport"
+ visibility: SKIP
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to the table.
+END
+ }
+ out_arg {
+ name: "keys"
+ description: <<END
+Vector of all keys present in the table.
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+Tensor of all values in the table. Indexed in parallel with `keys`.
+END
+ }
+ summary: "Outputs all keys and values in the table."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LookupTableExportV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_LookupTableExportV2.pbtxt
new file mode 100644
index 0000000000..2bc944c918
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LookupTableExportV2.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "LookupTableExportV2"
+ endpoint {
+ name: "LookupTableExport"
+ }
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to the table.
+END
+ }
+ out_arg {
+ name: "keys"
+ description: <<END
+Vector of all keys present in the table.
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+Tensor of all values in the table. Indexed in parallel with `keys`.
+END
+ }
+ summary: "Outputs all keys and values in the table."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LookupTableFind.pbtxt b/tensorflow/core/api_def/base_api/api_def_LookupTableFind.pbtxt
new file mode 100644
index 0000000000..ce1109e7eb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LookupTableFind.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "LookupTableFind"
+ visibility: SKIP
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to the table.
+END
+ }
+ in_arg {
+ name: "keys"
+ description: <<END
+Any shape. Keys to look up.
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+Same shape as `keys`. Values found in the table, or `default_values`
+for missing keys.
+END
+ }
+ summary: "Looks up keys in a table, outputs the corresponding values."
+ description: <<END
+The tensor `keys` must of the same type as the keys of the table.
+The output `values` is of the type of the table values.
+
+The scalar `default_value` is the value output for keys not present in the
+table. It must also be of the same type as the table values.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LookupTableFindV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_LookupTableFindV2.pbtxt
new file mode 100644
index 0000000000..30f69220e8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LookupTableFindV2.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "LookupTableFindV2"
+ endpoint {
+ name: "LookupTableFind"
+ }
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to the table.
+END
+ }
+ in_arg {
+ name: "keys"
+ description: <<END
+Any shape. Keys to look up.
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+Same shape as `keys`. Values found in the table, or `default_values`
+for missing keys.
+END
+ }
+ summary: "Looks up keys in a table, outputs the corresponding values."
+ description: <<END
+The tensor `keys` must of the same type as the keys of the table.
+The output `values` is of the type of the table values.
+
+The scalar `default_value` is the value output for keys not present in the
+table. It must also be of the same type as the table values.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LookupTableImport.pbtxt b/tensorflow/core/api_def/base_api/api_def_LookupTableImport.pbtxt
new file mode 100644
index 0000000000..6861c4e97d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LookupTableImport.pbtxt
@@ -0,0 +1,27 @@
+op {
+ graph_op_name: "LookupTableImport"
+ visibility: SKIP
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to the table.
+END
+ }
+ in_arg {
+ name: "keys"
+ description: <<END
+Any shape. Keys to look up.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+Values to associate with keys.
+END
+ }
+ summary: "Replaces the contents of the table with the specified keys and values."
+ description: <<END
+The tensor `keys` must be of the same type as the keys of the table.
+The tensor `values` must be of the type of the table values.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LookupTableImportV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_LookupTableImportV2.pbtxt
new file mode 100644
index 0000000000..f39fbc4996
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LookupTableImportV2.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "LookupTableImportV2"
+ endpoint {
+ name: "LookupTableImport"
+ }
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to the table.
+END
+ }
+ in_arg {
+ name: "keys"
+ description: <<END
+Any shape. Keys to look up.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+Values to associate with keys.
+END
+ }
+ summary: "Replaces the contents of the table with the specified keys and values."
+ description: <<END
+The tensor `keys` must be of the same type as the keys of the table.
+The tensor `values` must be of the type of the table values.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LookupTableInsert.pbtxt b/tensorflow/core/api_def/base_api/api_def_LookupTableInsert.pbtxt
new file mode 100644
index 0000000000..f07ac2f3db
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LookupTableInsert.pbtxt
@@ -0,0 +1,27 @@
+op {
+ graph_op_name: "LookupTableInsert"
+ visibility: SKIP
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to the table.
+END
+ }
+ in_arg {
+ name: "keys"
+ description: <<END
+Any shape. Keys to look up.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+Values to associate with keys.
+END
+ }
+ summary: "Updates the table to associates keys with values."
+ description: <<END
+The tensor `keys` must be of the same type as the keys of the table.
+The tensor `values` must be of the type of the table values.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LookupTableInsertV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_LookupTableInsertV2.pbtxt
new file mode 100644
index 0000000000..b93e68a5b0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LookupTableInsertV2.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "LookupTableInsertV2"
+ endpoint {
+ name: "LookupTableInsert"
+ }
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to the table.
+END
+ }
+ in_arg {
+ name: "keys"
+ description: <<END
+Any shape. Keys to look up.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+Values to associate with keys.
+END
+ }
+ summary: "Updates the table to associates keys with values."
+ description: <<END
+The tensor `keys` must be of the same type as the keys of the table.
+The tensor `values` must be of the type of the table values.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LookupTableSize.pbtxt b/tensorflow/core/api_def/base_api/api_def_LookupTableSize.pbtxt
new file mode 100644
index 0000000000..d561c45d62
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LookupTableSize.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "LookupTableSize"
+ visibility: SKIP
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to the table.
+END
+ }
+ out_arg {
+ name: "size"
+ description: <<END
+Scalar that contains number of elements in the table.
+END
+ }
+ summary: "Computes the number of elements in the given table."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LookupTableSizeV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_LookupTableSizeV2.pbtxt
new file mode 100644
index 0000000000..bf5ab25663
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LookupTableSizeV2.pbtxt
@@ -0,0 +1,19 @@
+op {
+ graph_op_name: "LookupTableSizeV2"
+ endpoint {
+ name: "LookupTableSize"
+ }
+ in_arg {
+ name: "table_handle"
+ description: <<END
+Handle to the table.
+END
+ }
+ out_arg {
+ name: "size"
+ description: <<END
+Scalar that contains number of elements in the table.
+END
+ }
+ summary: "Computes the number of elements in the given table."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_LoopCond.pbtxt b/tensorflow/core/api_def/base_api/api_def_LoopCond.pbtxt
new file mode 100644
index 0000000000..7b2dbdf4b4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_LoopCond.pbtxt
@@ -0,0 +1,20 @@
+op {
+ graph_op_name: "LoopCond"
+ in_arg {
+ name: "input"
+ description: <<END
+A boolean scalar, representing the branch predicate of the Switch op.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The same tensor as `input`.
+END
+ }
+ summary: "Forwards the input to the output."
+ description: <<END
+This operator represents the loop termination condition used by the
+"pivot" switches of a loop.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_M.pbtxt b/tensorflow/core/api_def/base_api/api_def_M.pbtxt
deleted file mode 100644
index 7295928bad..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_M.pbtxt
+++ /dev/null
@@ -1,749 +0,0 @@
-op {
- graph_op_name: "MakeIterator"
- endpoint {
- name: "MakeIterator"
- }
- summary: "Makes a new iterator from the given `dataset` and stores it in `iterator`."
- description: <<END
-This operation may be executed multiple times. Each execution will reset the
-iterator in `iterator` to the first element of `dataset`.
-END
-}
-op {
- graph_op_name: "MapClear"
- endpoint {
- name: "MapClear"
- }
- summary: "Op removes all elements in the underlying container."
-}
-op {
- graph_op_name: "MapDataset"
- endpoint {
- name: "MapDataset"
- }
- summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
-}
-op {
- graph_op_name: "MapIncompleteSize"
- endpoint {
- name: "MapIncompleteSize"
- }
- summary: "Op returns the number of incomplete elements in the underlying container."
-}
-op {
- graph_op_name: "MapPeek"
- endpoint {
- name: "MapPeek"
- }
- summary: "Op peeks at the values at the specified key. If the"
- description: <<END
-underlying container does not contain this key
-this op will block until it does.
-END
-}
-op {
- graph_op_name: "MapSize"
- endpoint {
- name: "MapSize"
- }
- summary: "Op returns the number of elements in the underlying container."
-}
-op {
- graph_op_name: "MapStage"
- endpoint {
- name: "MapStage"
- }
- summary: "Stage (key, values) in the underlying container which behaves like a hashtable."
-}
-op {
- graph_op_name: "MapUnstage"
- endpoint {
- name: "MapUnstage"
- }
- summary: "Op removes and returns the values associated with the key"
- description: <<END
-from the underlying container. If the underlying container
-does not contain this key, the op will block until it does.
-END
-}
-op {
- graph_op_name: "MapUnstageNoKey"
- endpoint {
- name: "MapUnstageNoKey"
- }
- summary: "Op removes and returns a random (key, value)"
- description: <<END
-from the underlying container. If the underlying container
-does not contain elements, the op will block until it does.
-END
-}
-op {
- graph_op_name: "MatMul"
- endpoint {
- name: "MatMul"
- }
- summary: "Multiply the matrix \"a\" by the matrix \"b\"."
- description: <<END
-The inputs must be two-dimensional matrices and the inner dimension of
-"a" (after being transposed if transpose_a is true) must match the
-outer dimension of "b" (after being transposed if transposed_b is
-true).
-
-*Note*: The default kernel implementation for MatMul on GPUs uses
-cublas.
-END
-}
-op {
- graph_op_name: "MatchingFiles"
- endpoint {
- name: "MatchingFiles"
- }
- summary: "Returns the set of files matching one or more glob patterns."
- description: <<END
-Note that this routine only supports wildcard characters in the
-basename portion of the pattern, not in the directory portion.
-END
-}
-op {
- graph_op_name: "MatrixBandPart"
- endpoint {
- name: "MatrixBandPart"
- }
- summary: "Copy a tensor setting everything outside a central band in each innermost matrix"
- description: <<END
-to zero.
-
-The `band` part is computed as follows:
-Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
-tensor with the same shape where
-
-`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
-
-The indicator function
-
-`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
- (num_upper < 0 || (n-m) <= num_upper)`.
-
-For example:
-
-```
-# if 'input' is [[ 0, 1, 2, 3]
- [-1, 0, 1, 2]
- [-2, -1, 0, 1]
- [-3, -2, -1, 0]],
-
-tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]
- [-1, 0, 1, 2]
- [ 0, -1, 0, 1]
- [ 0, 0, -1, 0]],
-
-tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]
- [-1, 0, 1, 0]
- [-2, -1, 0, 1]
- [ 0, -2, -1, 0]]
-```
-
-Useful special cases:
-
-```
- tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
- tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
- tf.matrix_band_part(input, 0, 0) ==> Diagonal.
-```
-END
-}
-op {
- graph_op_name: "MatrixDeterminant"
- endpoint {
- name: "MatrixDeterminant"
- }
- summary: "Computes the determinant of one or more square matrices."
- description: <<END
-The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-form square matrices. The output is a tensor containing the determinants
-for all input submatrices `[..., :, :]`.
-END
-}
-op {
- graph_op_name: "MatrixDiag"
- endpoint {
- name: "MatrixDiag"
- }
- summary: "Returns a batched diagonal tensor with a given batched diagonal values."
- description: <<END
-Given a `diagonal`, this operation returns a tensor with the `diagonal` and
-everything else padded with zeros. The diagonal is computed as follows:
-
-Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
-tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
-
-`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
-
-For example:
-
-```
-# 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
-
-and diagonal.shape = (2, 4)
-
-tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
- [0, 2, 0, 0]
- [0, 0, 3, 0]
- [0, 0, 0, 4]],
- [[5, 0, 0, 0]
- [0, 6, 0, 0]
- [0, 0, 7, 0]
- [0, 0, 0, 8]]]
-
-which has shape (2, 4, 4)
-```
-END
-}
-op {
- graph_op_name: "MatrixDiagPart"
- endpoint {
- name: "MatrixDiagPart"
- }
- summary: "Returns the batched diagonal part of a batched tensor."
- description: <<END
-This operation returns a tensor with the `diagonal` part
-of the batched `input`. The `diagonal` part is computed as follows:
-
-Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
-tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
-
-`diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
-
-The input must be at least a matrix.
-
-For example:
-
-```
-# 'input' is [[[1, 0, 0, 0]
- [0, 2, 0, 0]
- [0, 0, 3, 0]
- [0, 0, 0, 4]],
- [[5, 0, 0, 0]
- [0, 6, 0, 0]
- [0, 0, 7, 0]
- [0, 0, 0, 8]]]
-
-and input.shape = (2, 4, 4)
-
-tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
-
-which has shape (2, 4)
-```
-END
-}
-op {
- graph_op_name: "MatrixInverse"
- endpoint {
- name: "MatrixInverse"
- }
- summary: "Computes the inverse of one or more square invertible matrices or their"
- description: <<END
-adjoints (conjugate transposes).
-
-The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-form square matrices. The output is a tensor of the same shape as the input
-containing the inverse for all input submatrices `[..., :, :]`.
-
-The op uses LU decomposition with partial pivoting to compute the inverses.
-
-If a matrix is not invertible there is no guarantee what the op does. It
-may detect the condition and raise an exception or it may simply return a
-garbage result.
-END
-}
-op {
- graph_op_name: "MatrixSetDiag"
- endpoint {
- name: "MatrixSetDiag"
- }
- summary: "Returns a batched matrix tensor with new batched diagonal values."
- description: <<END
-Given `input` and `diagonal`, this operation returns a tensor with the
-same shape and values as `input`, except for the main diagonal of the
-innermost matrices. These will be overwritten by the values in `diagonal`.
-
-The output is computed as follows:
-
-Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
-`k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a
-tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
-
- * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
- * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
-END
-}
-op {
- graph_op_name: "MatrixSolve"
- endpoint {
- name: "MatrixSolve"
- }
- summary: "Solves systems of linear equations."
- description: <<END
-`Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
-a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix
-satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
-If `adjoint` is `True` then each output matrix satisfies
-`adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
-END
-}
-op {
- graph_op_name: "MatrixSolveLs"
- endpoint {
- name: "MatrixSolveLs"
- }
- summary: "Solves one or more linear least-squares problems."
- description: <<END
-`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
-form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same
-type as `matrix` and shape `[..., M, K]`.
-The output is a tensor shape `[..., N, K]` where each output matrix solves
-each of the equations
-`matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`
-in the least squares sense.
-
-We use the following notation for (complex) matrix and right-hand sides
-in the batch:
-
-`matrix`=\\(A \in \mathbb{C}^{m \times n}\\),
-`rhs`=\\(B \in \mathbb{C}^{m \times k}\\),
-`output`=\\(X \in \mathbb{C}^{n \times k}\\),
-`l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
-
-If `fast` is `True`, then the solution is computed by solving the normal
-equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
-\\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares
-problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 +
-\lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
-\\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
-minimum-norm solution to the under-determined linear system, i.e.
-\\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\),
-subject to \\(A Z = B\\). Notice that the fast path is only numerically stable
-when \\(A\\) is numerically full rank and has a condition number
-\\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or\\(\lambda\\) is
-sufficiently large.
-
-If `fast` is `False` an algorithm based on the numerically robust complete
-orthogonal decomposition is used. This computes the minimum-norm
-least-squares solution, even when \\(A\\) is rank deficient. This path is
-typically 6-7 times slower than the fast path. If `fast` is `False` then
-`l2_regularizer` is ignored.
-END
-}
-op {
- graph_op_name: "MatrixTriangularSolve"
- endpoint {
- name: "MatrixTriangularSolve"
- }
- summary: "Solves systems of linear equations with upper or lower triangular matrices by"
- description: <<END
-backsubstitution.
-
-`matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
-square matrices. If `lower` is `True` then the strictly upper triangular part
-of each inner-most matrix is assumed to be zero and not accessed.
-If `lower` is False then the strictly lower triangular part of each inner-most
-matrix is assumed to be zero and not accessed.
-`rhs` is a tensor of shape `[..., M, K]`.
-
-The output is a tensor of shape `[..., M, K]`. If `adjoint` is
-`True` then the innermost matrices in `output` satisfy matrix equations
-`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
-If `adjoint` is `False` then the strictly then the innermost matrices in
-`output` satisfy matrix equations
-`adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
-END
-}
-op {
- graph_op_name: "Max"
- endpoint {
- name: "Max"
- }
- summary: "Computes the maximum of elements across dimensions of a tensor."
- description: <<END
-Reduces `input` along the dimensions given in `reduction_indices`. Unless
-`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
-retained with length 1.
-END
-}
-op {
- graph_op_name: "MaxPool"
- endpoint {
- name: "MaxPool"
- }
- summary: "Performs max pooling on the input."
-}
-op {
- graph_op_name: "MaxPool3D"
- endpoint {
- name: "MaxPool3D"
- }
- summary: "Performs 3D max pooling on the input."
-}
-op {
- graph_op_name: "MaxPool3DGrad"
- endpoint {
- name: "MaxPool3DGrad"
- }
- summary: "Computes gradients of max pooling function."
-}
-op {
- graph_op_name: "MaxPool3DGradGrad"
- endpoint {
- name: "MaxPool3DGradGrad"
- }
- summary: "Computes second-order gradients of the maxpooling function."
-}
-op {
- graph_op_name: "MaxPoolGrad"
- endpoint {
- name: "MaxPoolGrad"
- }
- summary: "Computes gradients of the maxpooling function."
-}
-op {
- graph_op_name: "MaxPoolGradGrad"
- endpoint {
- name: "MaxPoolGradGrad"
- }
- summary: "Computes second-order gradients of the maxpooling function."
-}
-op {
- graph_op_name: "MaxPoolGradGradV2"
- endpoint {
- name: "MaxPoolGradGradV2"
- }
- summary: "Computes second-order gradients of the maxpooling function."
-}
-op {
- graph_op_name: "MaxPoolGradGradWithArgmax"
- endpoint {
- name: "MaxPoolGradGradWithArgmax"
- }
- summary: "Computes second-order gradients of the maxpooling function."
-}
-op {
- graph_op_name: "MaxPoolGradV2"
- endpoint {
- name: "MaxPoolGradV2"
- }
- summary: "Computes gradients of the maxpooling function."
-}
-op {
- graph_op_name: "MaxPoolGradWithArgmax"
- endpoint {
- name: "MaxPoolGradWithArgmax"
- }
- summary: "Computes gradients of the maxpooling function."
-}
-op {
- graph_op_name: "MaxPoolV2"
- endpoint {
- name: "MaxPoolV2"
- }
- summary: "Performs max pooling on the input."
-}
-op {
- graph_op_name: "MaxPoolWithArgmax"
- endpoint {
- name: "MaxPoolWithArgmax"
- }
- summary: "Performs max pooling on the input and outputs both max values and indices."
- description: <<END
-The indices in `argmax` are flattened, so that a maximum value at position
-`[b, y, x, c]` becomes flattened index
-`((b * height + y) * width + x) * channels + c`.
-
-The indices returned are always in `[0, height) x [0, width)` before flattening,
-even if padding is involved and the mathematically correct answer is outside
-(either negative or too large). This is a bug, but fixing it is difficult to do
-in a safe backwards compatible way, especially due to flattening.
-END
-}
-op {
- graph_op_name: "Maximum"
- endpoint {
- name: "Maximum"
- }
- summary: "Returns the max of x and y (i.e. x > y ? x : y) element-wise."
- description: <<END
-*NOTE*: `Maximum` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "Mean"
- endpoint {
- name: "Mean"
- }
- summary: "Computes the mean of elements across dimensions of a tensor."
- description: <<END
-Reduces `input` along the dimensions given in `reduction_indices`. Unless
-`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
-retained with length 1.
-END
-}
-op {
- graph_op_name: "Merge"
- endpoint {
- name: "Merge"
- }
- summary: "Forwards the value of an available tensor from `inputs` to `output`."
- description: <<END
-`Merge` waits for at least one of the tensors in `inputs` to become available.
-It is usually combined with `Switch` to implement branching.
-
-`Merge` forwards the first tensor to become available to `output`, and sets
-`value_index` to its index in `inputs`.
-END
-}
-op {
- graph_op_name: "MergeSummary"
- endpoint {
- name: "MergeSummary"
- }
- summary: "Merges summaries."
- description: <<END
-This op creates a
-[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
-protocol buffer that contains the union of all the values in the input
-summaries.
-
-When the Op is run, it reports an `InvalidArgument` error if multiple values
-in the summaries to merge use the same tag.
-END
-}
-op {
- graph_op_name: "MergeV2Checkpoints"
- endpoint {
- name: "MergeV2Checkpoints"
- }
- summary: "V2 format specific: merges the metadata files of sharded checkpoints. The"
- description: <<END
-result is one logical checkpoint, with one physical metadata file and renamed
-data files.
-
-Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
-
-If delete_old_dirs is true, attempts to delete recursively the dirname of each
-path in the input checkpoint_prefixes. This is useful when those paths are non
-user-facing temporary locations.
-END
-}
-op {
- graph_op_name: "Mfcc"
- endpoint {
- name: "Mfcc"
- }
- summary: "Transforms a spectrogram into a form that\'s useful for speech recognition."
- description: <<END
-Mel Frequency Cepstral Coefficients are a way of representing audio data that's
-been effective as an input feature for machine learning. They are created by
-taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
-higher frequencies that are less significant to the human ear. They have a long
-history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
-is a good resource to learn more.
-END
-}
-op {
- graph_op_name: "Min"
- endpoint {
- name: "Min"
- }
- summary: "Computes the minimum of elements across dimensions of a tensor."
- description: <<END
-Reduces `input` along the dimensions given in `reduction_indices`. Unless
-`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
-retained with length 1.
-END
-}
-op {
- graph_op_name: "Minimum"
- endpoint {
- name: "Minimum"
- }
- summary: "Returns the min of x and y (i.e. x < y ? x : y) element-wise."
- description: <<END
-*NOTE*: `Minimum` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "MirrorPad"
- endpoint {
- name: "MirrorPad"
- }
- summary: "Pads a tensor with mirrored values."
- description: <<END
-This operation pads a `input` with mirrored values according to the `paddings`
-you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
-the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
-how many values to add before the contents of `input` in that dimension, and
-`paddings[D, 1]` indicates how many values to add after the contents of `input`
-in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
-than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
-(if false, respectively).
-
-The padded size of each dimension D of the output is:
-
-`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
-
-For example:
-
-```
-# 't' is [[1, 2, 3], [4, 5, 6]].
-# 'paddings' is [[1, 1]], [2, 2]].
-# 'mode' is SYMMETRIC.
-# rank of 't' is 2.
-pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
- [2, 1, 1, 2, 3, 3, 2]
- [5, 4, 4, 5, 6, 6, 5]
- [5, 4, 4, 5, 6, 6, 5]]
-```
-END
-}
-op {
- graph_op_name: "MirrorPadGrad"
- endpoint {
- name: "MirrorPadGrad"
- }
- summary: "Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor."
- description: <<END
-This operation folds the padded areas of `input` by `MirrorPad` according to the
-`paddings` you specify. `paddings` must be the same as `paddings` argument
-given to the corresponding `MirrorPad` op.
-
-The folded size of each dimension D of the output is:
-
-`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
-
-For example:
-
-```
-# 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
-# 'paddings' is [[0, 1]], [0, 1]].
-# 'mode' is SYMMETRIC.
-# rank of 't' is 2.
-pad(t, paddings) ==> [[ 1, 5]
- [11, 28]]
-```
-END
-}
-op {
- graph_op_name: "Mod"
- endpoint {
- name: "Mod"
- }
- summary: "Returns element-wise remainder of division. This emulates C semantics in that"
- description: <<END
-the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
-y + truncate_mod(x, y) = x`.
-
-*NOTE*: `Mod` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "Mul"
- endpoint {
- name: "Mul"
- }
- summary: "Returns x * y element-wise."
- description: <<END
-*NOTE*: `Mul` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "Multinomial"
- endpoint {
- name: "Multinomial"
- }
- summary: "Draws samples from a multinomial distribution."
-}
-op {
- graph_op_name: "MutableDenseHashTable"
- endpoint {
- name: "MutableDenseHashTable"
- }
- summary: "Creates an empty hash table that uses tensors as the backing store."
- description: <<END
-It uses "open addressing" with quadratic reprobing to resolve
-collisions.
-
-This op creates a mutable hash table, specifying the type of its keys and
-values. Each value must be a scalar. Data can be inserted into the table using
-the insert operations. It does not support the initialization operation.
-END
-}
-op {
- graph_op_name: "MutableDenseHashTableV2"
- endpoint {
- name: "MutableDenseHashTableV2"
- }
- summary: "Creates an empty hash table that uses tensors as the backing store."
- description: <<END
-It uses "open addressing" with quadratic reprobing to resolve
-collisions.
-
-This op creates a mutable hash table, specifying the type of its keys and
-values. Each value must be a scalar. Data can be inserted into the table using
-the insert operations. It does not support the initialization operation.
-END
-}
-op {
- graph_op_name: "MutableHashTable"
- endpoint {
- name: "MutableHashTable"
- }
- summary: "Creates an empty hash table."
- description: <<END
-This op creates a mutable hash table, specifying the type of its keys and
-values. Each value must be a scalar. Data can be inserted into the table using
-the insert operations. It does not support the initialization operation.
-END
-}
-op {
- graph_op_name: "MutableHashTableOfTensors"
- endpoint {
- name: "MutableHashTableOfTensors"
- }
- summary: "Creates an empty hash table."
- description: <<END
-This op creates a mutable hash table, specifying the type of its keys and
-values. Each value must be a vector. Data can be inserted into the table using
-the insert operations. It does not support the initialization operation.
-END
-}
-op {
- graph_op_name: "MutableHashTableOfTensorsV2"
- endpoint {
- name: "MutableHashTableOfTensorsV2"
- }
- summary: "Creates an empty hash table."
- description: <<END
-This op creates a mutable hash table, specifying the type of its keys and
-values. Each value must be a vector. Data can be inserted into the table using
-the insert operations. It does not support the initialization operation.
-END
-}
-op {
- graph_op_name: "MutableHashTableV2"
- endpoint {
- name: "MutableHashTableV2"
- }
- summary: "Creates an empty hash table."
- description: <<END
-This op creates a mutable hash table, specifying the type of its keys and
-values. Each value must be a scalar. Data can be inserted into the table using
-the insert operations. It does not support the initialization operation.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_MakeIterator.pbtxt b/tensorflow/core/api_def/base_api/api_def_MakeIterator.pbtxt
new file mode 100644
index 0000000000..921ea86a4b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MakeIterator.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "MakeIterator"
+ summary: "Makes a new iterator from the given `dataset` and stores it in `iterator`."
+ description: <<END
+This operation may be executed multiple times. Each execution will reset the
+iterator in `iterator` to the first element of `dataset`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MapAndBatchDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_MapAndBatchDataset.pbtxt
new file mode 100644
index 0000000000..bf544703de
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MapAndBatchDataset.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "MapAndBatchDataset"
+ in_arg {
+ name: "batch_size"
+ description: <<END
+A scalar representing the number of elements to accumulate in a
+batch. It determines the number of concurrent invocations of `f` that process
+elements from `input_dataset` in parallel.
+END
+ }
+ in_arg {
+ name: "num_parallel_batches"
+ description: <<END
+A scalar representing the number of batches to create in
+parallel. Processing multiple batches in parallel benefits workloads prone to
+stragglers.
+END
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset` and then"
+ description: <<END
+batches `batch_size` of them.
+
+Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up
+to `batch_size * num_parallel_batches` copies of `f` in parallel.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MapClear.pbtxt b/tensorflow/core/api_def/base_api/api_def_MapClear.pbtxt
new file mode 100644
index 0000000000..6c3c2d48b0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MapClear.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MapClear"
+ summary: "Op removes all elements in the underlying container."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MapDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_MapDataset.pbtxt
new file mode 100644
index 0000000000..76d63ec247
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MapDataset.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MapDataset"
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MapIncompleteSize.pbtxt b/tensorflow/core/api_def/base_api/api_def_MapIncompleteSize.pbtxt
new file mode 100644
index 0000000000..bd63305ac2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MapIncompleteSize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MapIncompleteSize"
+ summary: "Op returns the number of incomplete elements in the underlying container."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MapPeek.pbtxt b/tensorflow/core/api_def/base_api/api_def_MapPeek.pbtxt
new file mode 100644
index 0000000000..80eb6d5943
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MapPeek.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "MapPeek"
+ summary: "Op peeks at the values at the specified key. If the"
+ description: <<END
+underlying container does not contain this key
+this op will block until it does.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MapSize.pbtxt b/tensorflow/core/api_def/base_api/api_def_MapSize.pbtxt
new file mode 100644
index 0000000000..9412019f59
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MapSize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MapSize"
+ summary: "Op returns the number of elements in the underlying container."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MapStage.pbtxt b/tensorflow/core/api_def/base_api/api_def_MapStage.pbtxt
new file mode 100644
index 0000000000..555fe538ef
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MapStage.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "MapStage"
+ in_arg {
+ name: "key"
+ description: <<END
+int64
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+a list of tensors
+dtypes A list of data types that inserted values should adhere to.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+Maximum number of elements in the Staging Area. If > 0, inserts
+on the container will block when the capacity is reached.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this queue is placed in the given container. Otherwise,
+a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+It is necessary to match this name to the matching Unstage Op.
+END
+ }
+ summary: "Stage (key, values) in the underlying container which behaves like a hashtable."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MapUnstage.pbtxt b/tensorflow/core/api_def/base_api/api_def_MapUnstage.pbtxt
new file mode 100644
index 0000000000..29a10cf928
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MapUnstage.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "MapUnstage"
+ summary: "Op removes and returns the values associated with the key"
+ description: <<END
+from the underlying container. If the underlying container
+does not contain this key, the op will block until it does.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MapUnstageNoKey.pbtxt b/tensorflow/core/api_def/base_api/api_def_MapUnstageNoKey.pbtxt
new file mode 100644
index 0000000000..b9da7e65d7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MapUnstageNoKey.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "MapUnstageNoKey"
+ summary: "Op removes and returns a random (key, value)"
+ description: <<END
+from the underlying container. If the underlying container
+does not contain elements, the op will block until it does.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatMul.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatMul.pbtxt
new file mode 100644
index 0000000000..bdc55e81ae
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MatMul.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "MatMul"
+ attr {
+ name: "transpose_a"
+ description: <<END
+If true, "a" is transposed before multiplication.
+END
+ }
+ attr {
+ name: "transpose_b"
+ description: <<END
+If true, "b" is transposed before multiplication.
+END
+ }
+ summary: "Multiply the matrix \"a\" by the matrix \"b\"."
+ description: <<END
+The inputs must be two-dimensional matrices and the inner dimension of
+"a" (after being transposed if transpose_a is true) must match the
+outer dimension of "b" (after being transposed if transposed_b is
+true).
+
+*Note*: The default kernel implementation for MatMul on GPUs uses
+cublas.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatchingFiles.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatchingFiles.pbtxt
new file mode 100644
index 0000000000..8da76684e5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MatchingFiles.pbtxt
@@ -0,0 +1,20 @@
+op {
+ graph_op_name: "MatchingFiles"
+ in_arg {
+ name: "pattern"
+ description: <<END
+Shell wildcard pattern(s). Scalar or vector of type string.
+END
+ }
+ out_arg {
+ name: "filenames"
+ description: <<END
+A vector of matching filenames.
+END
+ }
+ summary: "Returns the set of files matching one or more glob patterns."
+ description: <<END
+Note that this routine only supports wildcard characters in the
+basename portion of the pattern, not in the directory portion.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixBandPart.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixBandPart.pbtxt
new file mode 100644
index 0000000000..eaf3d28437
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixBandPart.pbtxt
@@ -0,0 +1,71 @@
+op {
+ graph_op_name: "MatrixBandPart"
+ in_arg {
+ name: "input"
+ description: <<END
+Rank `k` tensor.
+END
+ }
+ in_arg {
+ name: "num_lower"
+ description: <<END
+0-D tensor. Number of subdiagonals to keep. If negative, keep entire
+lower triangle.
+END
+ }
+ in_arg {
+ name: "num_upper"
+ description: <<END
+0-D tensor. Number of superdiagonals to keep. If negative, keep
+entire upper triangle.
+END
+ }
+ out_arg {
+ name: "band"
+ description: <<END
+Rank `k` tensor of the same shape as input. The extracted banded tensor.
+END
+ }
+ summary: "Copy a tensor setting everything outside a central band in each innermost matrix"
+ description: <<END
+to zero.
+
+The `band` part is computed as follows:
+Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
+tensor with the same shape where
+
+`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
+
+The indicator function
+
+`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
+ (num_upper < 0 || (n-m) <= num_upper)`.
+
+For example:
+
+```
+# if 'input' is [[ 0, 1, 2, 3]
+ [-1, 0, 1, 2]
+ [-2, -1, 0, 1]
+ [-3, -2, -1, 0]],
+
+tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]
+ [-1, 0, 1, 2]
+ [ 0, -1, 0, 1]
+ [ 0, 0, -1, 0]],
+
+tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]
+ [-1, 0, 1, 0]
+ [-2, -1, 0, 1]
+ [ 0, -2, -1, 0]]
+```
+
+Useful special cases:
+
+```
+ tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
+ tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
+ tf.matrix_band_part(input, 0, 0) ==> Diagonal.
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixDeterminant.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixDeterminant.pbtxt
new file mode 100644
index 0000000000..0acfee2a30
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixDeterminant.pbtxt
@@ -0,0 +1,21 @@
+op {
+ graph_op_name: "MatrixDeterminant"
+ in_arg {
+ name: "input"
+ description: <<END
+Shape is `[..., M, M]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Shape is `[...]`.
+END
+ }
+ summary: "Computes the determinant of one or more square matrices."
+ description: <<END
+The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+form square matrices. The output is a tensor containing the determinants
+for all input submatrices `[..., :, :]`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixDiag.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixDiag.pbtxt
new file mode 100644
index 0000000000..59f8902d54
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixDiag.pbtxt
@@ -0,0 +1,44 @@
+op {
+ graph_op_name: "MatrixDiag"
+ in_arg {
+ name: "diagonal"
+ description: <<END
+Rank `k`, where `k >= 1`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
+END
+ }
+ summary: "Returns a batched diagonal tensor with a given batched diagonal values."
+ description: <<END
+Given a `diagonal`, this operation returns a tensor with the `diagonal` and
+everything else padded with zeros. The diagonal is computed as follows:
+
+Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
+tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
+
+`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
+
+For example:
+
+```
+# 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
+
+and diagonal.shape = (2, 4)
+
+tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
+ [0, 2, 0, 0]
+ [0, 0, 3, 0]
+ [0, 0, 0, 4]],
+ [[5, 0, 0, 0]
+ [0, 6, 0, 0]
+ [0, 0, 7, 0]
+ [0, 0, 0, 8]]]
+
+which has shape (2, 4, 4)
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixDiagPart.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixDiagPart.pbtxt
new file mode 100644
index 0000000000..2c2dbc7f26
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixDiagPart.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "MatrixDiagPart"
+ in_arg {
+ name: "input"
+ description: <<END
+Rank `k` tensor where `k >= 2`.
+END
+ }
+ out_arg {
+ name: "diagonal"
+ description: <<END
+The extracted diagonal(s) having shape
+`diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
+END
+ }
+ summary: "Returns the batched diagonal part of a batched tensor."
+ description: <<END
+This operation returns a tensor with the `diagonal` part
+of the batched `input`. The `diagonal` part is computed as follows:
+
+Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
+tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
+
+`diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
+
+The input must be at least a matrix.
+
+For example:
+
+```
+# 'input' is [[[1, 0, 0, 0]
+ [0, 2, 0, 0]
+ [0, 0, 3, 0]
+ [0, 0, 0, 4]],
+ [[5, 0, 0, 0]
+ [0, 6, 0, 0]
+ [0, 0, 7, 0]
+ [0, 0, 0, 8]]]
+
+and input.shape = (2, 4, 4)
+
+tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
+
+which has shape (2, 4)
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixInverse.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixInverse.pbtxt
new file mode 100644
index 0000000000..25eca0c766
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixInverse.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "MatrixInverse"
+ in_arg {
+ name: "input"
+ description: <<END
+Shape is `[..., M, M]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Shape is `[..., M, M]`.
+
+@compatibility(numpy)
+Equivalent to np.linalg.inv
+@end_compatibility
+END
+ }
+ summary: "Computes the inverse of one or more square invertible matrices or their"
+ description: <<END
+adjoints (conjugate transposes).
+
+The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+form square matrices. The output is a tensor of the same shape as the input
+containing the inverse for all input submatrices `[..., :, :]`.
+
+The op uses LU decomposition with partial pivoting to compute the inverses.
+
+If a matrix is not invertible there is no guarantee what the op does. It
+may detect the condition and raise an exception or it may simply return a
+garbage result.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixSetDiag.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixSetDiag.pbtxt
new file mode 100644
index 0000000000..5190902d7e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixSetDiag.pbtxt
@@ -0,0 +1,36 @@
+op {
+ graph_op_name: "MatrixSetDiag"
+ in_arg {
+ name: "input"
+ description: <<END
+Rank `k+1`, where `k >= 1`.
+END
+ }
+ in_arg {
+ name: "diagonal"
+ description: <<END
+Rank `k`, where `k >= 1`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Rank `k+1`, with `output.shape = input.shape`.
+END
+ }
+ summary: "Returns a batched matrix tensor with new batched diagonal values."
+ description: <<END
+Given `input` and `diagonal`, this operation returns a tensor with the
+same shape and values as `input`, except for the main diagonal of the
+innermost matrices. These will be overwritten by the values in `diagonal`.
+
+The output is computed as follows:
+
+Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
+`k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a
+tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
+
+ * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
+ * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixSolve.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixSolve.pbtxt
new file mode 100644
index 0000000000..d3b1216d40
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixSolve.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "MatrixSolve"
+ in_arg {
+ name: "matrix"
+ description: <<END
+Shape is `[..., M, M]`.
+END
+ }
+ in_arg {
+ name: "rhs"
+ description: <<END
+Shape is `[..., M, K]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Shape is `[..., M, K]`.
+END
+ }
+ attr {
+ name: "adjoint"
+ description: <<END
+Boolean indicating whether to solve with `matrix` or its (block-wise)
+adjoint.
+END
+ }
+ summary: "Solves systems of linear equations."
+ description: <<END
+`Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
+a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix
+satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
+If `adjoint` is `True` then each output matrix satisfies
+`adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixSolveLs.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixSolveLs.pbtxt
new file mode 100644
index 0000000000..51d91399f8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixSolveLs.pbtxt
@@ -0,0 +1,68 @@
+op {
+ graph_op_name: "MatrixSolveLs"
+ in_arg {
+ name: "matrix"
+ description: <<END
+Shape is `[..., M, N]`.
+END
+ }
+ in_arg {
+ name: "rhs"
+ description: <<END
+Shape is `[..., M, K]`.
+END
+ }
+ in_arg {
+ name: "l2_regularizer"
+ description: <<END
+Scalar tensor.
+
+@compatibility(numpy)
+Equivalent to np.linalg.lstsq
+@end_compatibility
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Shape is `[..., N, K]`.
+END
+ }
+ summary: "Solves one or more linear least-squares problems."
+ description: <<END
+`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
+form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same
+type as `matrix` and shape `[..., M, K]`.
+The output is a tensor shape `[..., N, K]` where each output matrix solves
+each of the equations
+`matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`
+in the least squares sense.
+
+We use the following notation for (complex) matrix and right-hand sides
+in the batch:
+
+`matrix`=\\(A \in \mathbb{C}^{m \times n}\\),
+`rhs`=\\(B \in \mathbb{C}^{m \times k}\\),
+`output`=\\(X \in \mathbb{C}^{n \times k}\\),
+`l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
+
+If `fast` is `True`, then the solution is computed by solving the normal
+equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
+\\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares
+problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 +
+\lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
+\\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
+minimum-norm solution to the under-determined linear system, i.e.
+\\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\),
+subject to \\(A Z = B\\). Notice that the fast path is only numerically stable
+when \\(A\\) is numerically full rank and has a condition number
+\\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or\\(\lambda\\) is
+sufficiently large.
+
+If `fast` is `False` an algorithm based on the numerically robust complete
+orthogonal decomposition is used. This computes the minimum-norm
+least-squares solution, even when \\(A\\) is rank deficient. This path is
+typically 6-7 times slower than the fast path. If `fast` is `False` then
+`l2_regularizer` is ignored.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixTriangularSolve.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixTriangularSolve.pbtxt
new file mode 100644
index 0000000000..a2bfcdc66e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixTriangularSolve.pbtxt
@@ -0,0 +1,57 @@
+op {
+ graph_op_name: "MatrixTriangularSolve"
+ in_arg {
+ name: "matrix"
+ description: <<END
+Shape is `[..., M, M]`.
+END
+ }
+ in_arg {
+ name: "rhs"
+ description: <<END
+Shape is `[..., M, K]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Shape is `[..., M, K]`.
+END
+ }
+ attr {
+ name: "lower"
+ description: <<END
+Boolean indicating whether the innermost matrices in `matrix` are
+lower or upper triangular.
+END
+ }
+ attr {
+ name: "adjoint"
+ description: <<END
+Boolean indicating whether to solve with `matrix` or its (block-wise)
+ adjoint.
+
+@compatibility(numpy)
+Equivalent to np.linalg.triangular_solve
+@end_compatibility
+END
+ }
+ summary: "Solves systems of linear equations with upper or lower triangular matrices by"
+ description: <<END
+backsubstitution.
+
+`matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
+square matrices. If `lower` is `True` then the strictly upper triangular part
+of each inner-most matrix is assumed to be zero and not accessed.
+If `lower` is False then the strictly lower triangular part of each inner-most
+matrix is assumed to be zero and not accessed.
+`rhs` is a tensor of shape `[..., M, K]`.
+
+The output is a tensor of shape `[..., M, K]`. If `adjoint` is
+`True` then the innermost matrices in `output` satisfy matrix equations
+`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
+If `adjoint` is `False` then the strictly then the innermost matrices in
+`output` satisfy matrix equations
+`adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Max.pbtxt b/tensorflow/core/api_def/base_api/api_def_Max.pbtxt
new file mode 100644
index 0000000000..9a807d9f37
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Max.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "Max"
+ endpoint {
+ name: "Max"
+ }
+ endpoint {
+ name: "ReduceMax"
+ }
+ in_arg {
+ name: "input"
+ description: <<END
+The tensor to reduce.
+END
+ }
+ in_arg {
+ name: "reduction_indices"
+ rename_to: "axis"
+ description: <<END
+The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The reduced tensor.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If true, retain reduced dimensions with length 1.
+END
+ }
+ summary: "Computes the maximum of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPool.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPool.pbtxt
new file mode 100644
index 0000000000..885bc1c279
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPool.pbtxt
@@ -0,0 +1,45 @@
+op {
+ graph_op_name: "MaxPool"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D input to pool over.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The max pooled output tensor.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+The size of the window for each dimension of the input tensor.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the
+input tensor.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, in_height, in_width, in_channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+END
+ }
+ summary: "Performs max pooling on the input."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPool3D.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPool3D.pbtxt
new file mode 100644
index 0000000000..8f07ee5fc1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPool3D.pbtxt
@@ -0,0 +1,46 @@
+op {
+ graph_op_name: "MaxPool3D"
+ in_arg {
+ name: "input"
+ description: <<END
+Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The max pooled output tensor.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+1-D tensor of length 5. The size of the window for each dimension of
+the input tensor. Must have `ksize[0] = ksize[4] = 1`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D tensor of length 5. The stride of the sliding window for each
+dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format of the input and output data. With the
+default format "NDHWC", the data is stored in the order of:
+ [batch, in_depth, in_height, in_width, in_channels].
+Alternatively, the format could be "NCDHW", the data storage order is:
+ [batch, in_channels, in_depth, in_height, in_width].
+END
+ }
+ summary: "Performs 3D max pooling on the input."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPool3DGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPool3DGrad.pbtxt
new file mode 100644
index 0000000000..78c3c5f4bd
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPool3DGrad.pbtxt
@@ -0,0 +1,52 @@
+op {
+ graph_op_name: "MaxPool3DGrad"
+ in_arg {
+ name: "orig_input"
+ description: <<END
+The original input tensor.
+END
+ }
+ in_arg {
+ name: "orig_output"
+ description: <<END
+The original output tensor.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+Output backprop of shape `[batch, depth, rows, cols, channels]`.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+1-D tensor of length 5. The size of the window for each dimension of
+the input tensor. Must have `ksize[0] = ksize[4] = 1`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D tensor of length 5. The stride of the sliding window for each
+dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format of the input and output data. With the
+default format "NDHWC", the data is stored in the order of:
+ [batch, in_depth, in_height, in_width, in_channels].
+Alternatively, the format could be "NCDHW", the data storage order is:
+ [batch, in_channels, in_depth, in_height, in_width].
+END
+ }
+ summary: "Computes gradients of max pooling function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPool3DGradGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPool3DGradGrad.pbtxt
new file mode 100644
index 0000000000..7593e9a7fe
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPool3DGradGrad.pbtxt
@@ -0,0 +1,58 @@
+op {
+ graph_op_name: "MaxPool3DGradGrad"
+ in_arg {
+ name: "orig_input"
+ description: <<END
+The original input tensor.
+END
+ }
+ in_arg {
+ name: "orig_output"
+ description: <<END
+The original output tensor.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+Output backprop of shape `[batch, depth, rows, cols, channels]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Gradients of gradients w.r.t. the input to `max_pool`.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+1-D tensor of length 5. The size of the window for each dimension of
+the input tensor. Must have `ksize[0] = ksize[4] = 1`.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+1-D tensor of length 5. The stride of the sliding window for each
+dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+The data format of the input and output data. With the
+default format "NDHWC", the data is stored in the order of:
+ [batch, in_depth, in_height, in_width, in_channels].
+Alternatively, the format could be "NCDHW", the data storage order is:
+ [batch, in_channels, in_depth, in_height, in_width].
+END
+ }
+ summary: "Computes second-order gradients of the maxpooling function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPoolGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPoolGrad.pbtxt
new file mode 100644
index 0000000000..be3e1972a0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPoolGrad.pbtxt
@@ -0,0 +1,58 @@
+op {
+ graph_op_name: "MaxPoolGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "orig_input"
+ description: <<END
+The original input tensor.
+END
+ }
+ in_arg {
+ name: "orig_output"
+ description: <<END
+The original output tensor.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+4-D. Gradients w.r.t. the output of `max_pool`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Gradients w.r.t. the input to `max_pool`.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+The size of the window for each dimension of the input tensor.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the
+input tensor.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, in_height, in_width, in_channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+END
+ }
+ summary: "Computes gradients of the maxpooling function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPoolGradGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPoolGradGrad.pbtxt
new file mode 100644
index 0000000000..83f319001f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPoolGradGrad.pbtxt
@@ -0,0 +1,57 @@
+op {
+ graph_op_name: "MaxPoolGradGrad"
+ in_arg {
+ name: "orig_input"
+ description: <<END
+The original input tensor.
+END
+ }
+ in_arg {
+ name: "orig_output"
+ description: <<END
+The original output tensor.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+4-D. Gradients of gradients w.r.t. the input of `max_pool`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Gradients of gradients w.r.t. the input to `max_pool`.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+The size of the window for each dimension of the input tensor.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the
+input tensor.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, in_height, in_width, in_channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+END
+ }
+ summary: "Computes second-order gradients of the maxpooling function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPoolGradGradV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPoolGradGradV2.pbtxt
new file mode 100644
index 0000000000..a55e02ac40
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPoolGradGradV2.pbtxt
@@ -0,0 +1,57 @@
+op {
+ graph_op_name: "MaxPoolGradGradV2"
+ in_arg {
+ name: "orig_input"
+ description: <<END
+The original input tensor.
+END
+ }
+ in_arg {
+ name: "orig_output"
+ description: <<END
+The original output tensor.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+4-D. Gradients of gradients w.r.t. the input of `max_pool`.
+END
+ }
+ in_arg {
+ name: "ksize"
+ description: <<END
+The size of the window for each dimension of the input tensor.
+END
+ }
+ in_arg {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the
+input tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Gradients of gradients w.r.t. the input to `max_pool`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, in_height, in_width, in_channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+END
+ }
+ summary: "Computes second-order gradients of the maxpooling function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPoolGradGradWithArgmax.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPoolGradGradWithArgmax.pbtxt
new file mode 100644
index 0000000000..63c5604d60
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPoolGradGradWithArgmax.pbtxt
@@ -0,0 +1,48 @@
+op {
+ graph_op_name: "MaxPoolGradGradWithArgmax"
+ in_arg {
+ name: "input"
+ description: <<END
+The original input.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
+input of `max_pool`.
+END
+ }
+ in_arg {
+ name: "argmax"
+ description: <<END
+The indices of the maximum values chosen for each output of `max_pool`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Gradients of gradients w.r.t. the input of `max_pool`.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+The size of the window for each dimension of the input tensor.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the
+input tensor.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Computes second-order gradients of the maxpooling function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPoolGradV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPoolGradV2.pbtxt
new file mode 100644
index 0000000000..e72877bb32
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPoolGradV2.pbtxt
@@ -0,0 +1,57 @@
+op {
+ graph_op_name: "MaxPoolGradV2"
+ in_arg {
+ name: "orig_input"
+ description: <<END
+The original input tensor.
+END
+ }
+ in_arg {
+ name: "orig_output"
+ description: <<END
+The original output tensor.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+4-D. Gradients w.r.t. the output of `max_pool`.
+END
+ }
+ in_arg {
+ name: "ksize"
+ description: <<END
+The size of the window for each dimension of the input tensor.
+END
+ }
+ in_arg {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the
+input tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Gradients w.r.t. the input to `max_pool`.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, in_height, in_width, in_channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+END
+ }
+ summary: "Computes gradients of the maxpooling function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPoolGradWithArgmax.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPoolGradWithArgmax.pbtxt
new file mode 100644
index 0000000000..4ae503e79d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPoolGradWithArgmax.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "MaxPoolGradWithArgmax"
+ visibility: HIDDEN
+ in_arg {
+ name: "input"
+ description: <<END
+The original input.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
+output of `max_pool`.
+END
+ }
+ in_arg {
+ name: "argmax"
+ description: <<END
+The indices of the maximum values chosen for each output of `max_pool`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Gradients w.r.t. the input of `max_pool`.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+The size of the window for each dimension of the input tensor.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the
+input tensor.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Computes gradients of the maxpooling function."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPoolV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPoolV2.pbtxt
new file mode 100644
index 0000000000..51b1edff6f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPoolV2.pbtxt
@@ -0,0 +1,45 @@
+op {
+ graph_op_name: "MaxPoolV2"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D input to pool over.
+END
+ }
+ in_arg {
+ name: "ksize"
+ description: <<END
+The size of the window for each dimension of the input tensor.
+END
+ }
+ in_arg {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the
+input tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The max pooled output tensor.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ attr {
+ name: "data_format"
+ description: <<END
+Specify the data format of the input and output data. With the
+default format "NHWC", the data is stored in the order of:
+ [batch, in_height, in_width, in_channels].
+Alternatively, the format could be "NCHW", the data storage order of:
+ [batch, in_channels, in_height, in_width].
+END
+ }
+ summary: "Performs max pooling on the input."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MaxPoolWithArgmax.pbtxt b/tensorflow/core/api_def/base_api/api_def_MaxPoolWithArgmax.pbtxt
new file mode 100644
index 0000000000..e717e57b50
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MaxPoolWithArgmax.pbtxt
@@ -0,0 +1,51 @@
+op {
+ graph_op_name: "MaxPoolWithArgmax"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`. Input to pool over.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The max pooled output tensor.
+END
+ }
+ out_arg {
+ name: "argmax"
+ description: <<END
+4-D. The flattened indices of the max values chosen for each output.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+The size of the window for each dimension of the input tensor.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the
+input tensor.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Performs max pooling on the input and outputs both max values and indices."
+ description: <<END
+The indices in `argmax` are flattened, so that a maximum value at position
+`[b, y, x, c]` becomes flattened index
+`((b * height + y) * width + x) * channels + c`.
+
+The indices returned are always in `[0, height) x [0, width)` before flattening,
+even if padding is involved and the mathematically correct answer is outside
+(either negative or too large). This is a bug, but fixing it is difficult to do
+in a safe backwards compatible way, especially due to flattening.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Maximum.pbtxt b/tensorflow/core/api_def/base_api/api_def_Maximum.pbtxt
new file mode 100644
index 0000000000..e52ca3f45d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Maximum.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Maximum"
+ summary: "Returns the max of x and y (i.e. x > y ? x : y) element-wise."
+ description: <<END
+*NOTE*: `Maximum` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Mean.pbtxt b/tensorflow/core/api_def/base_api/api_def_Mean.pbtxt
new file mode 100644
index 0000000000..7130162135
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Mean.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "Mean"
+ endpoint {
+ name: "Mean"
+ }
+ endpoint {
+ name: "ReduceMean"
+ }
+ in_arg {
+ name: "input"
+ description: <<END
+The tensor to reduce.
+END
+ }
+ in_arg {
+ name: "reduction_indices"
+ rename_to: "axis"
+ description: <<END
+The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The reduced tensor.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If true, retain reduced dimensions with length 1.
+END
+ }
+ summary: "Computes the mean of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Merge.pbtxt b/tensorflow/core/api_def/base_api/api_def_Merge.pbtxt
new file mode 100644
index 0000000000..130c384158
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Merge.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "Merge"
+ in_arg {
+ name: "inputs"
+ description: <<END
+The input tensors, exactly one of which will become available.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Will be set to the available input tensor.
+END
+ }
+ out_arg {
+ name: "value_index"
+ description: <<END
+The index of the chosen input tensor in `inputs`.
+END
+ }
+ summary: "Forwards the value of an available tensor from `inputs` to `output`."
+ description: <<END
+`Merge` waits for at least one of the tensors in `inputs` to become available.
+It is usually combined with `Switch` to implement branching.
+
+`Merge` forwards the first tensor to become available to `output`, and sets
+`value_index` to its index in `inputs`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MergeSummary.pbtxt b/tensorflow/core/api_def/base_api/api_def_MergeSummary.pbtxt
new file mode 100644
index 0000000000..8259690184
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MergeSummary.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "MergeSummary"
+ in_arg {
+ name: "inputs"
+ description: <<END
+Can be of any shape. Each must contain serialized `Summary` protocol
+buffers.
+END
+ }
+ out_arg {
+ name: "summary"
+ description: <<END
+Scalar. Serialized `Summary` protocol buffer.
+END
+ }
+ summary: "Merges summaries."
+ description: <<END
+This op creates a
+[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+protocol buffer that contains the union of all the values in the input
+summaries.
+
+When the Op is run, it reports an `InvalidArgument` error if multiple values
+in the summaries to merge use the same tag.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MergeV2Checkpoints.pbtxt b/tensorflow/core/api_def/base_api/api_def_MergeV2Checkpoints.pbtxt
new file mode 100644
index 0000000000..88cc164eb1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MergeV2Checkpoints.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "MergeV2Checkpoints"
+ in_arg {
+ name: "checkpoint_prefixes"
+ description: <<END
+prefixes of V2 checkpoints to merge.
+END
+ }
+ in_arg {
+ name: "destination_prefix"
+ description: <<END
+scalar. The desired final prefix. Allowed to be the same
+as one of the checkpoint_prefixes.
+END
+ }
+ attr {
+ name: "delete_old_dirs"
+ description: <<END
+see above.
+END
+ }
+ summary: "V2 format specific: merges the metadata files of sharded checkpoints. The"
+ description: <<END
+result is one logical checkpoint, with one physical metadata file and renamed
+data files.
+
+Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
+
+If delete_old_dirs is true, attempts to delete recursively the dirname of each
+path in the input checkpoint_prefixes. This is useful when those paths are non
+user-facing temporary locations.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Mfcc.pbtxt b/tensorflow/core/api_def/base_api/api_def_Mfcc.pbtxt
new file mode 100644
index 0000000000..217a0367a5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Mfcc.pbtxt
@@ -0,0 +1,51 @@
+op {
+ graph_op_name: "Mfcc"
+ in_arg {
+ name: "spectrogram"
+ description: <<END
+Typically produced by the Spectrogram op, with magnitude_squared
+set to true.
+END
+ }
+ in_arg {
+ name: "sample_rate"
+ description: <<END
+How many samples per second the source audio used.
+END
+ }
+ attr {
+ name: "upper_frequency_limit"
+ description: <<END
+The highest frequency to use when calculating the
+ceptstrum.
+END
+ }
+ attr {
+ name: "lower_frequency_limit"
+ description: <<END
+The lowest frequency to use when calculating the
+ceptstrum.
+END
+ }
+ attr {
+ name: "filterbank_channel_count"
+ description: <<END
+Resolution of the Mel bank used internally.
+END
+ }
+ attr {
+ name: "dct_coefficient_count"
+ description: <<END
+How many output channels to produce per time slice.
+END
+ }
+ summary: "Transforms a spectrogram into a form that\'s useful for speech recognition."
+ description: <<END
+Mel Frequency Cepstral Coefficients are a way of representing audio data that's
+been effective as an input feature for machine learning. They are created by
+taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
+higher frequencies that are less significant to the human ear. They have a long
+history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
+is a good resource to learn more.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Min.pbtxt b/tensorflow/core/api_def/base_api/api_def_Min.pbtxt
new file mode 100644
index 0000000000..0ddc865ab5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Min.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "Min"
+ endpoint {
+ name: "Min"
+ }
+ endpoint {
+ name: "ReduceMin"
+ }
+ in_arg {
+ name: "input"
+ description: <<END
+The tensor to reduce.
+END
+ }
+ in_arg {
+ name: "reduction_indices"
+ rename_to: "axis"
+ description: <<END
+The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The reduced tensor.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If true, retain reduced dimensions with length 1.
+END
+ }
+ summary: "Computes the minimum of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Minimum.pbtxt b/tensorflow/core/api_def/base_api/api_def_Minimum.pbtxt
new file mode 100644
index 0000000000..d0997f1a5c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Minimum.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Minimum"
+ summary: "Returns the min of x and y (i.e. x < y ? x : y) element-wise."
+ description: <<END
+*NOTE*: `Minimum` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MirrorPad.pbtxt b/tensorflow/core/api_def/base_api/api_def_MirrorPad.pbtxt
new file mode 100644
index 0000000000..6f738f72ce
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MirrorPad.pbtxt
@@ -0,0 +1,60 @@
+op {
+ graph_op_name: "MirrorPad"
+ in_arg {
+ name: "input"
+ description: <<END
+The input tensor to be padded.
+END
+ }
+ in_arg {
+ name: "paddings"
+ description: <<END
+A two-column matrix specifying the padding sizes. The number of
+rows must be the same as the rank of `input`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The padded tensor.
+END
+ }
+ attr {
+ name: "mode"
+ description: <<END
+Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
+do not include the borders, while in symmetric mode the padded regions
+do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`
+is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and
+it is `[1, 2, 3, 3, 2]` in symmetric mode.
+END
+ }
+ summary: "Pads a tensor with mirrored values."
+ description: <<END
+This operation pads a `input` with mirrored values according to the `paddings`
+you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
+the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+how many values to add before the contents of `input` in that dimension, and
+`paddings[D, 1]` indicates how many values to add after the contents of `input`
+in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
+than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
+(if false, respectively).
+
+The padded size of each dimension D of the output is:
+
+`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
+
+For example:
+
+```
+# 't' is [[1, 2, 3], [4, 5, 6]].
+# 'paddings' is [[1, 1]], [2, 2]].
+# 'mode' is SYMMETRIC.
+# rank of 't' is 2.
+pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
+ [2, 1, 1, 2, 3, 3, 2]
+ [5, 4, 4, 5, 6, 6, 5]
+ [5, 4, 4, 5, 6, 6, 5]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MirrorPadGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_MirrorPadGrad.pbtxt
new file mode 100644
index 0000000000..20db99a9d1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MirrorPadGrad.pbtxt
@@ -0,0 +1,50 @@
+op {
+ graph_op_name: "MirrorPadGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "input"
+ description: <<END
+The input tensor to be folded.
+END
+ }
+ in_arg {
+ name: "paddings"
+ description: <<END
+A two-column matrix specifying the padding sizes. The number of
+rows must be the same as the rank of `input`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The folded tensor.
+END
+ }
+ attr {
+ name: "mode"
+ description: <<END
+The mode used in the `MirrorPad` op.
+END
+ }
+ summary: "Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor."
+ description: <<END
+This operation folds the padded areas of `input` by `MirrorPad` according to the
+`paddings` you specify. `paddings` must be the same as `paddings` argument
+given to the corresponding `MirrorPad` op.
+
+The folded size of each dimension D of the output is:
+
+`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
+
+For example:
+
+```
+# 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
+# 'paddings' is [[0, 1]], [0, 1]].
+# 'mode' is SYMMETRIC.
+# rank of 't' is 2.
+pad(t, paddings) ==> [[ 1, 5]
+ [11, 28]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Mod.pbtxt b/tensorflow/core/api_def/base_api/api_def_Mod.pbtxt
new file mode 100644
index 0000000000..2a49ccff68
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Mod.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "Mod"
+ summary: "Returns element-wise remainder of division. This emulates C semantics in that"
+ description: <<END
+the result here is consistent with a truncating divide. E.g.
+`tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
+
+*NOTE*: `Mod` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Mul.pbtxt b/tensorflow/core/api_def/base_api/api_def_Mul.pbtxt
new file mode 100644
index 0000000000..13fad871f3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Mul.pbtxt
@@ -0,0 +1,14 @@
+op {
+ graph_op_name: "Mul"
+ endpoint {
+ name: "Multiply"
+ }
+ endpoint {
+ name: "Mul"
+ }
+ summary: "Returns x * y element-wise."
+ description: <<END
+*NOTE*: `Mul` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Multinomial.pbtxt b/tensorflow/core/api_def/base_api/api_def_Multinomial.pbtxt
new file mode 100644
index 0000000000..974e81e0fd
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Multinomial.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "Multinomial"
+ in_arg {
+ name: "logits"
+ description: <<END
+2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]`
+represents the unnormalized log probabilities for all classes.
+END
+ }
+ in_arg {
+ name: "num_samples"
+ description: <<END
+0-D. Number of independent samples to draw for each row slice.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]`
+contains the drawn class labels with range `[0, num_classes)`.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 is set to be non-zero, the internal random number
+generator is seeded by the given seed. Otherwise, a random seed is used.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ summary: "Draws samples from a multinomial distribution."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MutableDenseHashTable.pbtxt b/tensorflow/core/api_def/base_api/api_def_MutableDenseHashTable.pbtxt
new file mode 100644
index 0000000000..eaaed081cc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MutableDenseHashTable.pbtxt
@@ -0,0 +1,72 @@
+op {
+ graph_op_name: "MutableDenseHashTable"
+ visibility: SKIP
+ in_arg {
+ name: "empty_key"
+ description: <<END
+The key used to represent empty key buckets internally. Must not
+be used in insert or lookup operations.
+END
+ }
+ out_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this table is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this table is shared under the given name across
+multiple sessions.
+END
+ }
+ attr {
+ name: "key_dtype"
+ description: <<END
+Type of the table keys.
+END
+ }
+ attr {
+ name: "value_dtype"
+ description: <<END
+Type of the table values.
+END
+ }
+ attr {
+ name: "value_shape"
+ description: <<END
+The shape of each value.
+END
+ }
+ attr {
+ name: "initial_num_buckets"
+ description: <<END
+The initial number of hash table buckets. Must be a power
+to 2.
+END
+ }
+ attr {
+ name: "max_load_factor"
+ description: <<END
+The maximum ratio between number of entries and number of
+buckets before growing the table. Must be between 0 and 1.
+END
+ }
+ summary: "Creates an empty hash table that uses tensors as the backing store."
+ description: <<END
+It uses "open addressing" with quadratic reprobing to resolve
+collisions.
+
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a scalar. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MutableDenseHashTableV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_MutableDenseHashTableV2.pbtxt
new file mode 100644
index 0000000000..55fce83175
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MutableDenseHashTableV2.pbtxt
@@ -0,0 +1,74 @@
+op {
+ graph_op_name: "MutableDenseHashTableV2"
+ endpoint {
+ name: "MutableDenseHashTable"
+ }
+ in_arg {
+ name: "empty_key"
+ description: <<END
+The key used to represent empty key buckets internally. Must not
+be used in insert or lookup operations.
+END
+ }
+ out_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this table is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this table is shared under the given name across
+multiple sessions.
+END
+ }
+ attr {
+ name: "key_dtype"
+ description: <<END
+Type of the table keys.
+END
+ }
+ attr {
+ name: "value_dtype"
+ description: <<END
+Type of the table values.
+END
+ }
+ attr {
+ name: "value_shape"
+ description: <<END
+The shape of each value.
+END
+ }
+ attr {
+ name: "initial_num_buckets"
+ description: <<END
+The initial number of hash table buckets. Must be a power
+to 2.
+END
+ }
+ attr {
+ name: "max_load_factor"
+ description: <<END
+The maximum ratio between number of entries and number of
+buckets before growing the table. Must be between 0 and 1.
+END
+ }
+ summary: "Creates an empty hash table that uses tensors as the backing store."
+ description: <<END
+It uses "open addressing" with quadratic reprobing to resolve
+collisions.
+
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a scalar. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MutableHashTable.pbtxt b/tensorflow/core/api_def/base_api/api_def_MutableHashTable.pbtxt
new file mode 100644
index 0000000000..4bcdcdaf8a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MutableHashTable.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "MutableHashTable"
+ visibility: SKIP
+ out_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this table is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this table is shared under the given name across
+multiple sessions.
+END
+ }
+ attr {
+ name: "use_node_name_sharing"
+ description: <<END
+If true and shared_name is empty, the table is shared
+using the node name.
+END
+ }
+ attr {
+ name: "key_dtype"
+ description: <<END
+Type of the table keys.
+END
+ }
+ attr {
+ name: "value_dtype"
+ description: <<END
+Type of the table values.
+END
+ }
+ summary: "Creates an empty hash table."
+ description: <<END
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a scalar. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MutableHashTableOfTensors.pbtxt b/tensorflow/core/api_def/base_api/api_def_MutableHashTableOfTensors.pbtxt
new file mode 100644
index 0000000000..9bb37a3c40
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MutableHashTableOfTensors.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "MutableHashTableOfTensors"
+ visibility: SKIP
+ out_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this table is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this table is shared under the given name across
+multiple sessions.
+END
+ }
+ attr {
+ name: "key_dtype"
+ description: <<END
+Type of the table keys.
+END
+ }
+ attr {
+ name: "value_dtype"
+ description: <<END
+Type of the table values.
+END
+ }
+ summary: "Creates an empty hash table."
+ description: <<END
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a vector. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MutableHashTableOfTensorsV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_MutableHashTableOfTensorsV2.pbtxt
new file mode 100644
index 0000000000..1007cc96c0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MutableHashTableOfTensorsV2.pbtxt
@@ -0,0 +1,44 @@
+op {
+ graph_op_name: "MutableHashTableOfTensorsV2"
+ endpoint {
+ name: "MutableHashTableOfTensors"
+ }
+ out_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this table is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this table is shared under the given name across
+multiple sessions.
+END
+ }
+ attr {
+ name: "key_dtype"
+ description: <<END
+Type of the table keys.
+END
+ }
+ attr {
+ name: "value_dtype"
+ description: <<END
+Type of the table values.
+END
+ }
+ summary: "Creates an empty hash table."
+ description: <<END
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a vector. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_MutableHashTableV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_MutableHashTableV2.pbtxt
new file mode 100644
index 0000000000..0b37b5b07f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_MutableHashTableV2.pbtxt
@@ -0,0 +1,51 @@
+op {
+ graph_op_name: "MutableHashTableV2"
+ endpoint {
+ name: "MutableHashTable"
+ }
+ out_arg {
+ name: "table_handle"
+ description: <<END
+Handle to a table.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this table is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this table is shared under the given name across
+multiple sessions.
+END
+ }
+ attr {
+ name: "use_node_name_sharing"
+ description: <<END
+If true and shared_name is empty, the table is shared
+using the node name.
+END
+ }
+ attr {
+ name: "key_dtype"
+ description: <<END
+Type of the table keys.
+END
+ }
+ attr {
+ name: "value_dtype"
+ description: <<END
+Type of the table values.
+END
+ }
+ summary: "Creates an empty hash table."
+ description: <<END
+This op creates a mutable hash table, specifying the type of its keys and
+values. Each value must be a scalar. Data can be inserted into the table using
+the insert operations. It does not support the initialization operation.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_N.pbtxt b/tensorflow/core/api_def/base_api/api_def_N.pbtxt
deleted file mode 100644
index 0298a42cab..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_N.pbtxt
+++ /dev/null
@@ -1,94 +0,0 @@
-op {
- graph_op_name: "Neg"
- endpoint {
- name: "Neg"
- }
- summary: "Computes numerical negative value element-wise."
- description: <<END
-I.e., \\(y = -x\\).
-END
-}
-op {
- graph_op_name: "NegTrain"
- endpoint {
- name: "NegTrain"
- }
- summary: "Training via negative sampling."
-}
-op {
- graph_op_name: "NextIteration"
- endpoint {
- name: "NextIteration"
- }
- summary: "Makes its input available to the next iteration."
-}
-op {
- graph_op_name: "NoOp"
- endpoint {
- name: "NoOp"
- }
- summary: "Does nothing. Only useful as a placeholder for control edges."
-}
-op {
- graph_op_name: "NonMaxSuppression"
- endpoint {
- name: "NonMaxSuppression"
- }
- summary: "Greedily selects a subset of bounding boxes in descending order of score,"
- description: <<END
-pruning away boxes that have high intersection-over-union (IOU) overlap
-with previously selected boxes. Bounding boxes are supplied as
-[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
-diagonal pair of box corners and the coordinates can be provided as normalized
-(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
-is agnostic to where the origin is in the coordinate system. Note that this
-algorithm is invariant to orthogonal transformations and translations
-of the coordinate system; thus translating or reflections of the coordinate
-system result in the same boxes being selected by the algorithm.
-The output of this operation is a set of integers indexing into the input
-collection of bounding boxes representing the selected boxes. The bounding
-box coordinates corresponding to the selected indices can then be obtained
-using the `tf.gather operation`. For example:
- selected_indices = tf.image.non_max_suppression(
- boxes, scores, max_output_size, iou_threshold)
- selected_boxes = tf.gather(boxes, selected_indices)
-END
-}
-op {
- graph_op_name: "NonMaxSuppressionV2"
- endpoint {
- name: "NonMaxSuppressionV2"
- }
- summary: "Greedily selects a subset of bounding boxes in descending order of score,"
- description: <<END
-pruning away boxes that have high intersection-over-union (IOU) overlap
-with previously selected boxes. Bounding boxes are supplied as
-[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
-diagonal pair of box corners and the coordinates can be provided as normalized
-(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
-is agnostic to where the origin is in the coordinate system. Note that this
-algorithm is invariant to orthogonal transformations and translations
-of the coordinate system; thus translating or reflections of the coordinate
-system result in the same boxes being selected by the algorithm.
-
-The output of this operation is a set of integers indexing into the input
-collection of bounding boxes representing the selected boxes. The bounding
-box coordinates corresponding to the selected indices can then be obtained
-using the `tf.gather operation`. For example:
-
- selected_indices = tf.image.non_max_suppression_v2(
- boxes, scores, max_output_size, iou_threshold)
- selected_boxes = tf.gather(boxes, selected_indices)
-END
-}
-op {
- graph_op_name: "NotEqual"
- endpoint {
- name: "NotEqual"
- }
- summary: "Returns the truth value of (x != y) element-wise."
- description: <<END
-*NOTE*: `NotEqual` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_Neg.pbtxt b/tensorflow/core/api_def/base_api/api_def_Neg.pbtxt
new file mode 100644
index 0000000000..dafa218e5a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Neg.pbtxt
@@ -0,0 +1,13 @@
+op {
+ graph_op_name: "Neg"
+ endpoint {
+ name: "Negate"
+ }
+ endpoint {
+ name: "Neg"
+ }
+ summary: "Computes numerical negative value element-wise."
+ description: <<END
+I.e., \\(y = -x\\).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_NegTrain.pbtxt b/tensorflow/core/api_def/base_api/api_def_NegTrain.pbtxt
new file mode 100644
index 0000000000..4c8efac053
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_NegTrain.pbtxt
@@ -0,0 +1,40 @@
+op {
+ graph_op_name: "NegTrain"
+ in_arg {
+ name: "w_in"
+ description: <<END
+input word embedding.
+END
+ }
+ in_arg {
+ name: "w_out"
+ description: <<END
+output word embedding.
+END
+ }
+ in_arg {
+ name: "examples"
+ description: <<END
+A vector of word ids.
+END
+ }
+ in_arg {
+ name: "labels"
+ description: <<END
+A vector of word ids.
+END
+ }
+ attr {
+ name: "vocab_count"
+ description: <<END
+Count of words in the vocabulary.
+END
+ }
+ attr {
+ name: "num_negative_samples"
+ description: <<END
+Number of negative samples per example.
+END
+ }
+ summary: "Training via negative sampling."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_NextIteration.pbtxt b/tensorflow/core/api_def/base_api/api_def_NextIteration.pbtxt
new file mode 100644
index 0000000000..13178619ef
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_NextIteration.pbtxt
@@ -0,0 +1,16 @@
+op {
+ graph_op_name: "NextIteration"
+ in_arg {
+ name: "data"
+ description: <<END
+The tensor to be made available to the next iteration.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The same tensor as `data`.
+END
+ }
+ summary: "Makes its input available to the next iteration."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_NoOp.pbtxt b/tensorflow/core/api_def/base_api/api_def_NoOp.pbtxt
new file mode 100644
index 0000000000..d860149adc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_NoOp.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "NoOp"
+ summary: "Does nothing. Only useful as a placeholder for control edges."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_NonMaxSuppression.pbtxt b/tensorflow/core/api_def/base_api/api_def_NonMaxSuppression.pbtxt
new file mode 100644
index 0000000000..c8352b1b8c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_NonMaxSuppression.pbtxt
@@ -0,0 +1,56 @@
+op {
+ graph_op_name: "NonMaxSuppression"
+ in_arg {
+ name: "boxes"
+ description: <<END
+A 2-D float tensor of shape `[num_boxes, 4]`.
+END
+ }
+ in_arg {
+ name: "scores"
+ description: <<END
+A 1-D float tensor of shape `[num_boxes]` representing a single
+score corresponding to each box (each row of boxes).
+END
+ }
+ in_arg {
+ name: "max_output_size"
+ description: <<END
+A scalar integer tensor representing the maximum number of
+boxes to be selected by non max suppression.
+END
+ }
+ out_arg {
+ name: "selected_indices"
+ description: <<END
+A 1-D integer tensor of shape `[M]` representing the selected
+indices from the boxes tensor, where `M <= max_output_size`.
+END
+ }
+ attr {
+ name: "iou_threshold"
+ description: <<END
+A float representing the threshold for deciding whether boxes
+overlap too much with respect to IOU.
+END
+ }
+ summary: "Greedily selects a subset of bounding boxes in descending order of score,"
+ description: <<END
+pruning away boxes that have high intersection-over-union (IOU) overlap
+with previously selected boxes. Bounding boxes are supplied as
+[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
+diagonal pair of box corners and the coordinates can be provided as normalized
+(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
+is agnostic to where the origin is in the coordinate system. Note that this
+algorithm is invariant to orthogonal transformations and translations
+of the coordinate system; thus translating or reflections of the coordinate
+system result in the same boxes being selected by the algorithm.
+The output of this operation is a set of integers indexing into the input
+collection of bounding boxes representing the selected boxes. The bounding
+box coordinates corresponding to the selected indices can then be obtained
+using the `tf.gather operation`. For example:
+ selected_indices = tf.image.non_max_suppression(
+ boxes, scores, max_output_size, iou_threshold)
+ selected_boxes = tf.gather(boxes, selected_indices)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_NonMaxSuppressionV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_NonMaxSuppressionV2.pbtxt
new file mode 100644
index 0000000000..42146d106c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_NonMaxSuppressionV2.pbtxt
@@ -0,0 +1,58 @@
+op {
+ graph_op_name: "NonMaxSuppressionV2"
+ in_arg {
+ name: "boxes"
+ description: <<END
+A 2-D float tensor of shape `[num_boxes, 4]`.
+END
+ }
+ in_arg {
+ name: "scores"
+ description: <<END
+A 1-D float tensor of shape `[num_boxes]` representing a single
+score corresponding to each box (each row of boxes).
+END
+ }
+ in_arg {
+ name: "max_output_size"
+ description: <<END
+A scalar integer tensor representing the maximum number of
+boxes to be selected by non max suppression.
+END
+ }
+ in_arg {
+ name: "iou_threshold"
+ description: <<END
+A 0-D float tensor representing the threshold for deciding whether
+boxes overlap too much with respect to IOU.
+END
+ }
+ out_arg {
+ name: "selected_indices"
+ description: <<END
+A 1-D integer tensor of shape `[M]` representing the selected
+indices from the boxes tensor, where `M <= max_output_size`.
+END
+ }
+ summary: "Greedily selects a subset of bounding boxes in descending order of score,"
+ description: <<END
+pruning away boxes that have high intersection-over-union (IOU) overlap
+with previously selected boxes. Bounding boxes are supplied as
+[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
+diagonal pair of box corners and the coordinates can be provided as normalized
+(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
+is agnostic to where the origin is in the coordinate system. Note that this
+algorithm is invariant to orthogonal transformations and translations
+of the coordinate system; thus translating or reflections of the coordinate
+system result in the same boxes being selected by the algorithm.
+
+The output of this operation is a set of integers indexing into the input
+collection of bounding boxes representing the selected boxes. The bounding
+box coordinates corresponding to the selected indices can then be obtained
+using the `tf.gather operation`. For example:
+
+ selected_indices = tf.image.non_max_suppression_v2(
+ boxes, scores, max_output_size, iou_threshold)
+ selected_boxes = tf.gather(boxes, selected_indices)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_NotEqual.pbtxt b/tensorflow/core/api_def/base_api/api_def_NotEqual.pbtxt
new file mode 100644
index 0000000000..5c4b318534
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_NotEqual.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "NotEqual"
+ summary: "Returns the truth value of (x != y) element-wise."
+ description: <<END
+*NOTE*: `NotEqual` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_NthElement.pbtxt b/tensorflow/core/api_def/base_api/api_def_NthElement.pbtxt
new file mode 100644
index 0000000000..9ef20a26db
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_NthElement.pbtxt
@@ -0,0 +1,39 @@
+op {
+ graph_op_name: "NthElement"
+ in_arg {
+ name: "input"
+ description: <<END
+1-D or higher with last dimension at least `n+1`.
+END
+ }
+ in_arg {
+ name: "n"
+ description: <<END
+0-D. Position of sorted vector to select along the last dimension (along
+each row for matrices). Valid range of n is `[0, input.shape[:-1])`
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+The `n`-th order statistic along each last dimensional slice.
+END
+ }
+ attr {
+ name: "reverse"
+ description: <<END
+When set to True, find the nth-largest value in the vector and vice
+versa.
+END
+ }
+ summary: "Finds values of the `n`-th order statistic for the last dmension."
+ description: <<END
+If the input is a vector (rank-1), finds the entries which is the nth-smallest
+value in the vector and outputs their values as scalar tensor.
+
+For matrices (resp. higher rank input), computes the entries which is the
+nth-smallest value in each row (resp. vector along the last dimension). Thus,
+
+ values.shape = input.shape[:-1]
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_O.pbtxt b/tensorflow/core/api_def/base_api/api_def_O.pbtxt
deleted file mode 100644
index 3c62335da9..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_O.pbtxt
+++ /dev/null
@@ -1,195 +0,0 @@
-op {
- graph_op_name: "OneHot"
- endpoint {
- name: "OneHot"
- }
- summary: "Returns a one-hot tensor."
- description: <<END
-The locations represented by indices in `indices` take value `on_value`,
-while all other locations take value `off_value`.
-
-If the input `indices` is rank `N`, the output will have rank `N+1`,
-The new axis is created at dimension `axis` (default: the new axis is
-appended at the end).
-
-If `indices` is a scalar the output shape will be a vector of length `depth`.
-
-If `indices` is a vector of length `features`, the output shape will be:
-```
- features x depth if axis == -1
- depth x features if axis == 0
-```
-
-If `indices` is a matrix (batch) with shape `[batch, features]`,
-the output shape will be:
-```
- batch x features x depth if axis == -1
- batch x depth x features if axis == 1
- depth x batch x features if axis == 0
-```
-
-
-Examples
-=========
-
-Suppose that
-
-```
- indices = [0, 2, -1, 1]
- depth = 3
- on_value = 5.0
- off_value = 0.0
- axis = -1
-```
-
-Then output is `[4 x 3]`:
-
- ```output =
- [5.0 0.0 0.0] // one_hot(0)
- [0.0 0.0 5.0] // one_hot(2)
- [0.0 0.0 0.0] // one_hot(-1)
- [0.0 5.0 0.0] // one_hot(1)
- ```
-
-Suppose that
-
-```
- indices = [0, 2, -1, 1]
- depth = 3
- on_value = 0.0
- off_value = 3.0
- axis = 0
-```
-
-Then output is `[3 x 4]`:
-
- ```output =
- [0.0 3.0 3.0 3.0]
- [3.0 3.0 3.0 0.0]
- [3.0 3.0 3.0 3.0]
- [3.0 0.0 3.0 3.0]
- // ^ one_hot(0)
- // ^ one_hot(2)
- // ^ one_hot(-1)
- // ^ one_hot(1)
- ```
-Suppose that
-
-```
- indices = [[0, 2], [1, -1]]
- depth = 3
- on_value = 1.0
- off_value = 0.0
- axis = -1
-```
-
-Then output is `[2 x 2 x 3]`:
-
- ```output =
- [
- [1.0, 0.0, 0.0] // one_hot(0)
- [0.0, 0.0, 1.0] // one_hot(2)
- ][
- [0.0, 1.0, 0.0] // one_hot(1)
- [0.0, 0.0, 0.0] // one_hot(-1)
- ]```
-END
-}
-op {
- graph_op_name: "OneShotIterator"
- endpoint {
- name: "OneShotIterator"
- }
- summary: "Makes a \"one-shot\" iterator that can be iterated only once."
- description: <<END
-A one-shot iterator bundles the logic for defining the dataset and
-the state of the iterator in a single op, which allows simple input
-pipelines to be defined without an additional initialization
-("MakeIterator") step.
-
-One-shot iterators have the following limitations:
-
-* They do not support parameterization: all logic for creating the underlying
- dataset must be bundled in the `dataset_factory` function.
-* They are not resettable. Once a one-shot iterator reaches the end of its
- underlying dataset, subsequent "IteratorGetNext" operations on that
- iterator will always produce an `OutOfRange` error.
-
-For greater flexibility, use "Iterator" and "MakeIterator" to define
-an iterator using an arbitrary subgraph, which may capture tensors
-(including fed values) as parameters, and which may be reset multiple
-times by rerunning "MakeIterator".
-END
-}
-op {
- graph_op_name: "OnesLike"
- endpoint {
- name: "OnesLike"
- }
- summary: "Returns a tensor of ones with the same shape and type as x."
-}
-op {
- graph_op_name: "OrderedMapClear"
- endpoint {
- name: "OrderedMapClear"
- }
- summary: "Op removes all elements in the underlying container."
-}
-op {
- graph_op_name: "OrderedMapIncompleteSize"
- endpoint {
- name: "OrderedMapIncompleteSize"
- }
- summary: "Op returns the number of incomplete elements in the underlying container."
-}
-op {
- graph_op_name: "OrderedMapPeek"
- endpoint {
- name: "OrderedMapPeek"
- }
- summary: "Op peeks at the values at the specified key. If the"
- description: <<END
-underlying container does not contain this key
-this op will block until it does. This Op is optimized for
-performance.
-END
-}
-op {
- graph_op_name: "OrderedMapSize"
- endpoint {
- name: "OrderedMapSize"
- }
- summary: "Op returns the number of elements in the underlying container."
-}
-op {
- graph_op_name: "OrderedMapStage"
- endpoint {
- name: "OrderedMapStage"
- }
- summary: "Stage (key, values) in the underlying container which behaves like a ordered"
- description: <<END
-associative container. Elements are ordered by key.
-END
-}
-op {
- graph_op_name: "OrderedMapUnstage"
- endpoint {
- name: "OrderedMapUnstage"
- }
- summary: "Op removes and returns the values associated with the key"
- description: <<END
-from the underlying container. If the underlying container
-does not contain this key, the op will block until it does.
-END
-}
-op {
- graph_op_name: "OrderedMapUnstageNoKey"
- endpoint {
- name: "OrderedMapUnstageNoKey"
- }
- summary: "Op removes and returns the (key, value) element with the smallest"
- description: <<END
-key from the underlying container. If the underlying container
-does not contain elements, the op will block until it does.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_OneHot.pbtxt b/tensorflow/core/api_def/base_api/api_def_OneHot.pbtxt
new file mode 100644
index 0000000000..807b8ae310
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_OneHot.pbtxt
@@ -0,0 +1,130 @@
+op {
+ graph_op_name: "OneHot"
+ in_arg {
+ name: "indices"
+ description: <<END
+A tensor of indices.
+END
+ }
+ in_arg {
+ name: "depth"
+ description: <<END
+A scalar defining the depth of the one hot dimension.
+END
+ }
+ in_arg {
+ name: "on_value"
+ description: <<END
+A scalar defining the value to fill in output when `indices[j] = i`.
+END
+ }
+ in_arg {
+ name: "off_value"
+ description: <<END
+A scalar defining the value to fill in output when `indices[j] != i`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The one-hot tensor.
+END
+ }
+ attr {
+ name: "axis"
+ description: <<END
+The axis to fill (default: -1, a new inner-most axis).
+END
+ }
+ summary: "Returns a one-hot tensor."
+ description: <<END
+The locations represented by indices in `indices` take value `on_value`,
+while all other locations take value `off_value`.
+
+If the input `indices` is rank `N`, the output will have rank `N+1`,
+The new axis is created at dimension `axis` (default: the new axis is
+appended at the end).
+
+If `indices` is a scalar the output shape will be a vector of length `depth`.
+
+If `indices` is a vector of length `features`, the output shape will be:
+```
+ features x depth if axis == -1
+ depth x features if axis == 0
+```
+
+If `indices` is a matrix (batch) with shape `[batch, features]`,
+the output shape will be:
+```
+ batch x features x depth if axis == -1
+ batch x depth x features if axis == 1
+ depth x batch x features if axis == 0
+```
+
+
+Examples
+=========
+
+Suppose that
+
+```
+ indices = [0, 2, -1, 1]
+ depth = 3
+ on_value = 5.0
+ off_value = 0.0
+ axis = -1
+```
+
+Then output is `[4 x 3]`:
+
+ ```output =
+ [5.0 0.0 0.0] // one_hot(0)
+ [0.0 0.0 5.0] // one_hot(2)
+ [0.0 0.0 0.0] // one_hot(-1)
+ [0.0 5.0 0.0] // one_hot(1)
+ ```
+
+Suppose that
+
+```
+ indices = [0, 2, -1, 1]
+ depth = 3
+ on_value = 0.0
+ off_value = 3.0
+ axis = 0
+```
+
+Then output is `[3 x 4]`:
+
+ ```output =
+ [0.0 3.0 3.0 3.0]
+ [3.0 3.0 3.0 0.0]
+ [3.0 3.0 3.0 3.0]
+ [3.0 0.0 3.0 3.0]
+ // ^ one_hot(0)
+ // ^ one_hot(2)
+ // ^ one_hot(-1)
+ // ^ one_hot(1)
+ ```
+Suppose that
+
+```
+ indices = [[0, 2], [1, -1]]
+ depth = 3
+ on_value = 1.0
+ off_value = 0.0
+ axis = -1
+```
+
+Then output is `[2 x 2 x 3]`:
+
+ ```output =
+ [
+ [1.0, 0.0, 0.0] // one_hot(0)
+ [0.0, 0.0, 1.0] // one_hot(2)
+ ][
+ [0.0, 1.0, 0.0] // one_hot(1)
+ [0.0, 0.0, 0.0] // one_hot(-1)
+ ]```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_OneShotIterator.pbtxt b/tensorflow/core/api_def/base_api/api_def_OneShotIterator.pbtxt
new file mode 100644
index 0000000000..9040f2d982
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_OneShotIterator.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "OneShotIterator"
+ out_arg {
+ name: "handle"
+ description: <<END
+A handle to the iterator that can be passed to an "IteratorGetNext"
+op.
+END
+ }
+ attr {
+ name: "dataset_factory"
+ description: <<END
+A function of type `() -> DT_VARIANT`, where the returned
+DT_VARIANT is a dataset.
+END
+ }
+ summary: "Makes a \"one-shot\" iterator that can be iterated only once."
+ description: <<END
+A one-shot iterator bundles the logic for defining the dataset and
+the state of the iterator in a single op, which allows simple input
+pipelines to be defined without an additional initialization
+("MakeIterator") step.
+
+One-shot iterators have the following limitations:
+
+* They do not support parameterization: all logic for creating the underlying
+ dataset must be bundled in the `dataset_factory` function.
+* They are not resettable. Once a one-shot iterator reaches the end of its
+ underlying dataset, subsequent "IteratorGetNext" operations on that
+ iterator will always produce an `OutOfRange` error.
+
+For greater flexibility, use "Iterator" and "MakeIterator" to define
+an iterator using an arbitrary subgraph, which may capture tensors
+(including fed values) as parameters, and which may be reset multiple
+times by rerunning "MakeIterator".
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_OnesLike.pbtxt b/tensorflow/core/api_def/base_api/api_def_OnesLike.pbtxt
new file mode 100644
index 0000000000..7c640ab84e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_OnesLike.pbtxt
@@ -0,0 +1,16 @@
+op {
+ graph_op_name: "OnesLike"
+ in_arg {
+ name: "x"
+ description: <<END
+a tensor of type T.
+END
+ }
+ out_arg {
+ name: "y"
+ description: <<END
+a tensor of the same shape and type as x but filled with ones.
+END
+ }
+ summary: "Returns a tensor of ones with the same shape and type as x."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_OrderedMapClear.pbtxt b/tensorflow/core/api_def/base_api/api_def_OrderedMapClear.pbtxt
new file mode 100644
index 0000000000..8af5a82374
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_OrderedMapClear.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "OrderedMapClear"
+ summary: "Op removes all elements in the underlying container."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_OrderedMapIncompleteSize.pbtxt b/tensorflow/core/api_def/base_api/api_def_OrderedMapIncompleteSize.pbtxt
new file mode 100644
index 0000000000..1cb89477ab
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_OrderedMapIncompleteSize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "OrderedMapIncompleteSize"
+ summary: "Op returns the number of incomplete elements in the underlying container."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_OrderedMapPeek.pbtxt b/tensorflow/core/api_def/base_api/api_def_OrderedMapPeek.pbtxt
new file mode 100644
index 0000000000..bafdd425e2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_OrderedMapPeek.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "OrderedMapPeek"
+ summary: "Op peeks at the values at the specified key. If the"
+ description: <<END
+underlying container does not contain this key
+this op will block until it does. This Op is optimized for
+performance.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_OrderedMapSize.pbtxt b/tensorflow/core/api_def/base_api/api_def_OrderedMapSize.pbtxt
new file mode 100644
index 0000000000..c5bad3012c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_OrderedMapSize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "OrderedMapSize"
+ summary: "Op returns the number of elements in the underlying container."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_OrderedMapStage.pbtxt b/tensorflow/core/api_def/base_api/api_def_OrderedMapStage.pbtxt
new file mode 100644
index 0000000000..dad0b27601
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_OrderedMapStage.pbtxt
@@ -0,0 +1,40 @@
+op {
+ graph_op_name: "OrderedMapStage"
+ in_arg {
+ name: "key"
+ description: <<END
+int64
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+a list of tensors
+dtypes A list of data types that inserted values should adhere to.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+Maximum number of elements in the Staging Area. If > 0, inserts
+on the container will block when the capacity is reached.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this queue is placed in the given container. Otherwise,
+a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+It is necessary to match this name to the matching Unstage Op.
+END
+ }
+ summary: "Stage (key, values) in the underlying container which behaves like a ordered"
+ description: <<END
+associative container. Elements are ordered by key.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_OrderedMapUnstage.pbtxt b/tensorflow/core/api_def/base_api/api_def_OrderedMapUnstage.pbtxt
new file mode 100644
index 0000000000..731f1ac6cc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_OrderedMapUnstage.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "OrderedMapUnstage"
+ summary: "Op removes and returns the values associated with the key"
+ description: <<END
+from the underlying container. If the underlying container
+does not contain this key, the op will block until it does.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_OrderedMapUnstageNoKey.pbtxt b/tensorflow/core/api_def/base_api/api_def_OrderedMapUnstageNoKey.pbtxt
new file mode 100644
index 0000000000..ca517a1331
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_OrderedMapUnstageNoKey.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "OrderedMapUnstageNoKey"
+ summary: "Op removes and returns the (key, value) element with the smallest"
+ description: <<END
+key from the underlying container. If the underlying container
+does not contain elements, the op will block until it does.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_P.pbtxt b/tensorflow/core/api_def/base_api/api_def_P.pbtxt
deleted file mode 100644
index a3abb079e9..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_P.pbtxt
+++ /dev/null
@@ -1,431 +0,0 @@
-op {
- graph_op_name: "Pack"
- endpoint {
- name: "Pack"
- }
- summary: "Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor."
- description: <<END
-Packs the `N` tensors in `values` into a tensor with rank one higher than each
-tensor in `values`, by packing them along the `axis` dimension.
-Given a list of tensors of shape `(A, B, C)`;
-
-if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
-if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
-Etc.
-
-For example:
-
-```
-# 'x' is [1, 4]
-# 'y' is [2, 5]
-# 'z' is [3, 6]
-pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
-pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
-```
-
-This is the opposite of `unpack`.
-END
-}
-op {
- graph_op_name: "Pad"
- endpoint {
- name: "Pad"
- }
- summary: "Pads a tensor with zeros."
- description: <<END
-This operation pads a `input` with zeros according to the `paddings` you
-specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
-rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
-how many zeros to add before the contents of `input` in that dimension, and
-`paddings[D, 1]` indicates how many zeros to add after the contents of `input`
-in that dimension.
-
-The padded size of each dimension D of the output is:
-
-`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
-
-For example:
-
-```
-# 't' is [[1, 1], [2, 2]]
-# 'paddings' is [[1, 1], [2, 2]]
-# rank of 't' is 2
-pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
- [0, 0, 1, 1, 0, 0]
- [0, 0, 2, 2, 0, 0]
- [0, 0, 0, 0, 0, 0]]
-```
-END
-}
-op {
- graph_op_name: "PadV2"
- endpoint {
- name: "PadV2"
- }
- summary: "Pads a tensor."
- description: <<END
-This operation pads `input` according to the `paddings` and `constant_values`
-you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
-the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
-how many padding values to add before the contents of `input` in that dimension,
-and `paddings[D, 1]` indicates how many padding values to add after the contents
-of `input` in that dimension. `constant_values` is a scalar tensor of the same
-type as `input` that indicates the value to use for padding `input`.
-
-The padded size of each dimension D of the output is:
-
-`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
-
-For example:
-
-```
-# 't' is [[1, 1], [2, 2]]
-# 'paddings' is [[1, 1], [2, 2]]
-# 'constant_values' is 0
-# rank of 't' is 2
-pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
- [0, 0, 1, 1, 0, 0]
- [0, 0, 2, 2, 0, 0]
- [0, 0, 0, 0, 0, 0]]
-```
-END
-}
-op {
- graph_op_name: "PaddedBatchDataset"
- endpoint {
- name: "PaddedBatchDataset"
- }
- summary: "Creates a dataset that batches and pads `batch_size` elements from the input."
-}
-op {
- graph_op_name: "PaddingFIFOQueue"
- endpoint {
- name: "PaddingFIFOQueue"
- }
- summary: "A queue that produces elements in first-in first-out order."
- description: <<END
-Variable-size shapes are allowed by setting the corresponding shape dimensions
-to 0 in the shape attr. In this case DequeueMany will pad up to the maximum
-size of any given element in the minibatch. See below for details.
-END
-}
-op {
- graph_op_name: "PaddingFIFOQueueV2"
- endpoint {
- name: "PaddingFIFOQueueV2"
- }
- summary: "A queue that produces elements in first-in first-out order."
- description: <<END
-Variable-size shapes are allowed by setting the corresponding shape dimensions
-to 0 in the shape attr. In this case DequeueMany will pad up to the maximum
-size of any given element in the minibatch. See below for details.
-END
-}
-op {
- graph_op_name: "ParallelConcat"
- endpoint {
- name: "ParallelConcat"
- }
- summary: "Concatenates a list of `N` tensors along the first dimension."
- description: <<END
-The input tensors are all required to have size 1 in the first dimension.
-
-For example:
-
-```
-# 'x' is [[1, 4]]
-# 'y' is [[2, 5]]
-# 'z' is [[3, 6]]
-parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
-```
-
-The difference between concat and parallel_concat is that concat requires all
-of the inputs be computed before the operation will begin but doesn't require
-that the input shapes be known during graph construction. Parallel concat
-will copy pieces of the input into the output as they become available, in
-some situations this can provide a performance benefit.
-END
-}
-op {
- graph_op_name: "ParallelDynamicStitch"
- endpoint {
- name: "ParallelDynamicStitch"
- }
- summary: "Interleave the values from the `data` tensors into a single tensor."
- description: <<END
-Builds a merged tensor such that
-
-```python
- merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
-```
-
-For example, if each `indices[m]` is scalar or vector, we have
-
-```python
- # Scalar indices:
- merged[indices[m], ...] = data[m][...]
-
- # Vector indices:
- merged[indices[m][i], ...] = data[m][i, ...]
-```
-
-Each `data[i].shape` must start with the corresponding `indices[i].shape`,
-and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
-must have `data[i].shape = indices[i].shape + constant`. In terms of this
-`constant`, the output shape is
-
- merged.shape = [max(indices)] + constant
-
-Values may be merged in parallel, so if an index appears in both `indices[m][i]`
-and `indices[n][j]`, the result may be invalid. This differs from the normal
-DynamicStitch operator that defines the behavior in that case.
-
-For example:
-
-```python
- indices[0] = 6
- indices[1] = [4, 1]
- indices[2] = [[5, 2], [0, 3]]
- data[0] = [61, 62]
- data[1] = [[41, 42], [11, 12]]
- data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
- merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
- [51, 52], [61, 62]]
-```
-
-This method can be used to merge partitions created by `dynamic_partition`
-as illustrated on the following example:
-
-```python
- # Apply function (increments x_i) on elements for which a certain condition
- # apply (x_i != -1 in this example).
- x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
- condition_mask=tf.not_equal(x,tf.constant(-1.))
- partitioned_data = tf.dynamic_partition(
- x, tf.cast(condition_mask, tf.int32) , 2)
- partitioned_data[1] = partitioned_data[1] + 1.0
- condition_indices = tf.dynamic_partition(
- tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
- x = tf.dynamic_stitch(condition_indices, partitioned_data)
- # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
- # unchanged.
-```
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "ParallelMapDataset"
- endpoint {
- name: "ParallelMapDataset"
- }
- summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
- description: <<END
-Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up
-to `num_parallel_calls` copies of `f` in parallel.
-END
-}
-op {
- graph_op_name: "ParameterizedTruncatedNormal"
- endpoint {
- name: "ParameterizedTruncatedNormal"
- }
- summary: "Outputs random values from a normal distribution. The parameters may each be a"
- description: <<END
-scalar which applies to the entire output, or a vector of length shape[0] which
-stores the parameters for each batch.
-END
-}
-op {
- graph_op_name: "ParseExample"
- endpoint {
- name: "ParseExample"
- }
- summary: "Transforms a vector of brain.Example protos (as strings) into typed tensors."
-}
-op {
- graph_op_name: "ParseSingleSequenceExample"
- endpoint {
- name: "ParseSingleSequenceExample"
- }
- summary: "Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors."
-}
-op {
- graph_op_name: "ParseTensor"
- endpoint {
- name: "ParseTensor"
- }
- summary: "Transforms a serialized tensorflow.TensorProto proto into a Tensor."
-}
-op {
- graph_op_name: "Placeholder"
- endpoint {
- name: "Placeholder"
- }
- summary: "A placeholder op for a value that will be fed into the computation."
- description: <<END
-N.B. This operation will fail with an error if it is executed. It is
-intended as a way to represent a value that will always be fed, and to
-provide attrs that enable the fed value to be checked at runtime.
-END
-}
-op {
- graph_op_name: "PlaceholderV2"
- endpoint {
- name: "PlaceholderV2"
- }
- summary: "A placeholder op for a value that will be fed into the computation."
- description: <<END
-N.B. This operation will fail with an error if it is executed. It is
-intended as a way to represent a value that will always be fed, and to
-provide attrs that enable the fed value to be checked at runtime.
-END
-}
-op {
- graph_op_name: "PlaceholderWithDefault"
- endpoint {
- name: "PlaceholderWithDefault"
- }
- summary: "A placeholder op that passes through `input` when its output is not fed."
-}
-op {
- graph_op_name: "Polygamma"
- endpoint {
- name: "Polygamma"
- }
- summary: "Compute the polygamma function \\\\(\\psi^{(n)}(x)\\\\)."
- description: <<END
-The polygamma function is defined as:
-
-
-\\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\)
-
-where \\(\psi(x)\\) is the digamma function.
-END
-}
-op {
- graph_op_name: "PopulationCount"
- endpoint {
- name: "PopulationCount"
- }
- summary: "Computes element-wise population count (a.k.a. popcount, bitsum, bitcount)."
- description: <<END
-For each entry in `x`, calculates the number of `1` (on) bits in the binary
-representation of that entry.
-
-**NOTE**: It is more efficient to first `tf.bitcast` your tensors into
-`int32` or `int64` and perform the bitcount on the result, than to feed in
-8- or 16-bit inputs and then aggregate the resulting counts.
-END
-}
-op {
- graph_op_name: "Pow"
- endpoint {
- name: "Pow"
- }
- summary: "Computes the power of one value to another."
- description: <<END
-Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
-corresponding elements in `x` and `y`. For example:
-
-```
-# tensor 'x' is [[2, 2]], [3, 3]]
-# tensor 'y' is [[8, 16], [2, 3]]
-tf.pow(x, y) ==> [[256, 65536], [9, 27]]
-```
-END
-}
-op {
- graph_op_name: "PrefetchDataset"
- endpoint {
- name: "PrefetchDataset"
- }
- summary: "Creates a dataset that asynchronously prefetches elements from `input_dataset`."
-}
-op {
- graph_op_name: "PreventGradient"
- endpoint {
- name: "PreventGradient"
- }
- summary: "An identity op that triggers an error if a gradient is requested."
- description: <<END
-When executed in a graph, this op outputs its input tensor as-is.
-
-When building ops to compute gradients, the TensorFlow gradient system
-will return an error when trying to lookup the gradient of this op,
-because no gradient must ever be registered for this function. This
-op exists to prevent subtle bugs from silently returning unimplemented
-gradients in some corner cases.
-END
-}
-op {
- graph_op_name: "Print"
- endpoint {
- name: "Print"
- }
- summary: "Prints a list of tensors."
- description: <<END
-Passes `input` through to `output` and prints `data` when evaluating.
-END
-}
-op {
- graph_op_name: "PriorityQueue"
- endpoint {
- name: "PriorityQueue"
- }
- summary: "A queue that produces elements sorted by the first component value."
- description: <<END
-Note that the PriorityQueue requires the first component of any element
-to be a scalar int64, in addition to the other elements declared by
-component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
-and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
-entry in their input (resp. output) lists.
-END
-}
-op {
- graph_op_name: "PriorityQueueV2"
- endpoint {
- name: "PriorityQueueV2"
- }
- summary: "A queue that produces elements sorted by the first component value."
- description: <<END
-Note that the PriorityQueue requires the first component of any element
-to be a scalar int64, in addition to the other elements declared by
-component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
-and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
-entry in their input (resp. output) lists.
-END
-}
-op {
- graph_op_name: "Prod"
- endpoint {
- name: "Prod"
- }
- summary: "Computes the product of elements across dimensions of a tensor."
- description: <<END
-Reduces `input` along the dimensions given in `reduction_indices`. Unless
-`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
-retained with length 1.
-END
-}
-op {
- graph_op_name: "PyFunc"
- endpoint {
- name: "PyFunc"
- }
- summary: "Invokes a python function to compute func(input)->output."
- description: <<END
-This operation is considered stateful. For a stateless version, see
-PyFuncStateless.
-END
-}
-op {
- graph_op_name: "PyFuncStateless"
- endpoint {
- name: "PyFuncStateless"
- }
- summary: "A stateless version of PyFunc."
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_Pack.pbtxt b/tensorflow/core/api_def/base_api/api_def_Pack.pbtxt
new file mode 100644
index 0000000000..106ca3cd86
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Pack.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "Pack"
+ endpoint {
+ name: "Stack"
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+Must be of same shape and type.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The packed tensor.
+END
+ }
+ attr {
+ name: "axis"
+ description: <<END
+Dimension along which to pack. Negative values wrap around, so the
+valid range is `[-(R+1), R+1)`.
+END
+ }
+ summary: "Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor."
+ description: <<END
+Packs the `N` tensors in `values` into a tensor with rank one higher than each
+tensor in `values`, by packing them along the `axis` dimension.
+Given a list of tensors of shape `(A, B, C)`;
+
+if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
+if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
+Etc.
+
+For example:
+
+```
+# 'x' is [1, 4]
+# 'y' is [2, 5]
+# 'z' is [3, 6]
+pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
+pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
+```
+
+This is the opposite of `unpack`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Pad.pbtxt b/tensorflow/core/api_def/base_api/api_def_Pad.pbtxt
new file mode 100644
index 0000000000..e45e2375eb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Pad.pbtxt
@@ -0,0 +1,28 @@
+op {
+ graph_op_name: "Pad"
+ summary: "Pads a tensor with zeros."
+ description: <<END
+This operation pads a `input` with zeros according to the `paddings` you
+specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
+rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+how many zeros to add before the contents of `input` in that dimension, and
+`paddings[D, 1]` indicates how many zeros to add after the contents of `input`
+in that dimension.
+
+The padded size of each dimension D of the output is:
+
+`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
+
+For example:
+
+```
+# 't' is [[1, 1], [2, 2]]
+# 'paddings' is [[1, 1], [2, 2]]
+# rank of 't' is 2
+pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
+ [0, 0, 1, 1, 0, 0]
+ [0, 0, 2, 2, 0, 0]
+ [0, 0, 0, 0, 0, 0]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PadV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_PadV2.pbtxt
new file mode 100644
index 0000000000..7e2765764e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PadV2.pbtxt
@@ -0,0 +1,30 @@
+op {
+ graph_op_name: "PadV2"
+ summary: "Pads a tensor."
+ description: <<END
+This operation pads `input` according to the `paddings` and `constant_values`
+you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
+the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+how many padding values to add before the contents of `input` in that dimension,
+and `paddings[D, 1]` indicates how many padding values to add after the contents
+of `input` in that dimension. `constant_values` is a scalar tensor of the same
+type as `input` that indicates the value to use for padding `input`.
+
+The padded size of each dimension D of the output is:
+
+`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
+
+For example:
+
+```
+# 't' is [[1, 1], [2, 2]]
+# 'paddings' is [[1, 1], [2, 2]]
+# 'constant_values' is 0
+# rank of 't' is 2
+pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
+ [0, 0, 1, 1, 0, 0]
+ [0, 0, 2, 2, 0, 0]
+ [0, 0, 0, 0, 0, 0]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PaddedBatchDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_PaddedBatchDataset.pbtxt
new file mode 100644
index 0000000000..d243dfe8b6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PaddedBatchDataset.pbtxt
@@ -0,0 +1,27 @@
+op {
+ graph_op_name: "PaddedBatchDataset"
+ in_arg {
+ name: "batch_size"
+ description: <<END
+A scalar representing the number of elements to accumulate in a
+batch.
+END
+ }
+ in_arg {
+ name: "padded_shapes"
+ description: <<END
+A list of int64 tensors representing the desired padded shapes
+of the corresponding output components. These shapes may be partially
+specified, using `-1` to indicate that a particular dimension should be
+padded to the maximum size of all batch elements.
+END
+ }
+ in_arg {
+ name: "padding_values"
+ description: <<END
+A list of scalars containing the padding value to use for
+each of the outputs.
+END
+ }
+ summary: "Creates a dataset that batches and pads `batch_size` elements from the input."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PaddingFIFOQueue.pbtxt b/tensorflow/core/api_def/base_api/api_def_PaddingFIFOQueue.pbtxt
new file mode 100644
index 0000000000..3b6671a2f1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PaddingFIFOQueue.pbtxt
@@ -0,0 +1,56 @@
+op {
+ graph_op_name: "PaddingFIFOQueue"
+ visibility: SKIP
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the queue.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a value.
+END
+ }
+ attr {
+ name: "shapes"
+ description: <<END
+The shape of each component in a value. The length of this attr must
+be either 0 or the same as the length of component_types.
+Shapes of fixed rank but variable size are allowed by setting
+any shape dimension to -1. In this case, the inputs' shape may vary along
+the given dimension, and DequeueMany will pad the given dimension with
+zeros up to the maximum shape of all elements in the given batch.
+If the length of this attr is 0, different queue elements may have
+different ranks and shapes, but only one element may be dequeued at a time.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+The upper bound on the number of elements in this queue.
+Negative numbers mean no limit.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this queue is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this queue will be shared under the given name
+across multiple sessions.
+END
+ }
+ summary: "A queue that produces elements in first-in first-out order."
+ description: <<END
+Variable-size shapes are allowed by setting the corresponding shape dimensions
+to 0 in the shape attr. In this case DequeueMany will pad up to the maximum
+size of any given element in the minibatch. See below for details.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PaddingFIFOQueueV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_PaddingFIFOQueueV2.pbtxt
new file mode 100644
index 0000000000..b65be6f4f5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PaddingFIFOQueueV2.pbtxt
@@ -0,0 +1,58 @@
+op {
+ graph_op_name: "PaddingFIFOQueueV2"
+ endpoint {
+ name: "PaddingFIFOQueue"
+ }
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the queue.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a value.
+END
+ }
+ attr {
+ name: "shapes"
+ description: <<END
+The shape of each component in a value. The length of this attr must
+be either 0 or the same as the length of component_types.
+Shapes of fixed rank but variable size are allowed by setting
+any shape dimension to -1. In this case, the inputs' shape may vary along
+the given dimension, and DequeueMany will pad the given dimension with
+zeros up to the maximum shape of all elements in the given batch.
+If the length of this attr is 0, different queue elements may have
+different ranks and shapes, but only one element may be dequeued at a time.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+The upper bound on the number of elements in this queue.
+Negative numbers mean no limit.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this queue is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this queue will be shared under the given name
+across multiple sessions.
+END
+ }
+ summary: "A queue that produces elements in first-in first-out order."
+ description: <<END
+Variable-size shapes are allowed by setting the corresponding shape dimensions
+to 0 in the shape attr. In this case DequeueMany will pad up to the maximum
+size of any given element in the minibatch. See below for details.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ParallelConcat.pbtxt b/tensorflow/core/api_def/base_api/api_def_ParallelConcat.pbtxt
new file mode 100644
index 0000000000..9cf2449c9f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ParallelConcat.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "ParallelConcat"
+ in_arg {
+ name: "values"
+ description: <<END
+Tensors to be concatenated. All must have size 1 in the first dimension
+and same shape.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The concatenated tensor.
+END
+ }
+ attr {
+ name: "shape"
+ description: <<END
+the final shape of the result; should be equal to the shapes of any input
+but with the number of input values in the first dimension.
+END
+ }
+ summary: "Concatenates a list of `N` tensors along the first dimension."
+ description: <<END
+The input tensors are all required to have size 1 in the first dimension.
+
+For example:
+
+```
+# 'x' is [[1, 4]]
+# 'y' is [[2, 5]]
+# 'z' is [[3, 6]]
+parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
+```
+
+The difference between concat and parallel_concat is that concat requires all
+of the inputs be computed before the operation will begin but doesn't require
+that the input shapes be known during graph construction. Parallel concat
+will copy pieces of the input into the output as they become available, in
+some situations this can provide a performance benefit.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ParallelDynamicStitch.pbtxt b/tensorflow/core/api_def/base_api/api_def_ParallelDynamicStitch.pbtxt
new file mode 100644
index 0000000000..9404a4dee0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ParallelDynamicStitch.pbtxt
@@ -0,0 +1,67 @@
+op {
+ graph_op_name: "ParallelDynamicStitch"
+ summary: "Interleave the values from the `data` tensors into a single tensor."
+ description: <<END
+Builds a merged tensor such that
+
+```python
+ merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
+```
+
+For example, if each `indices[m]` is scalar or vector, we have
+
+```python
+ # Scalar indices:
+ merged[indices[m], ...] = data[m][...]
+
+ # Vector indices:
+ merged[indices[m][i], ...] = data[m][i, ...]
+```
+
+Each `data[i].shape` must start with the corresponding `indices[i].shape`,
+and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
+must have `data[i].shape = indices[i].shape + constant`. In terms of this
+`constant`, the output shape is
+
+ merged.shape = [max(indices)] + constant
+
+Values may be merged in parallel, so if an index appears in both `indices[m][i]`
+and `indices[n][j]`, the result may be invalid. This differs from the normal
+DynamicStitch operator that defines the behavior in that case.
+
+For example:
+
+```python
+ indices[0] = 6
+ indices[1] = [4, 1]
+ indices[2] = [[5, 2], [0, 3]]
+ data[0] = [61, 62]
+ data[1] = [[41, 42], [11, 12]]
+ data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
+ merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
+ [51, 52], [61, 62]]
+```
+
+This method can be used to merge partitions created by `dynamic_partition`
+as illustrated on the following example:
+
+```python
+ # Apply function (increments x_i) on elements for which a certain condition
+ # apply (x_i != -1 in this example).
+ x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
+ condition_mask=tf.not_equal(x,tf.constant(-1.))
+ partitioned_data = tf.dynamic_partition(
+ x, tf.cast(condition_mask, tf.int32) , 2)
+ partitioned_data[1] = partitioned_data[1] + 1.0
+ condition_indices = tf.dynamic_partition(
+ tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
+ x = tf.dynamic_stitch(condition_indices, partitioned_data)
+ # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
+ # unchanged.
+```
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ParallelInterleaveDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_ParallelInterleaveDataset.pbtxt
new file mode 100644
index 0000000000..d6889b54a0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ParallelInterleaveDataset.pbtxt
@@ -0,0 +1,21 @@
+op {
+ graph_op_name: "ParallelInterleaveDataset"
+ attr {
+ name: "f"
+ description: <<END
+A function mapping elements of `input_dataset`, concatenated with
+`other_arguments`, to a Dataset variant that contains elements matching
+`output_types` and `output_shapes`.
+END
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
+ description: <<END
+The resulting dataset is similar to the `InterleaveDataset`, with the exception
+that if retrieving the next value from a dataset would cause the requester to
+block, it will skip that input dataset. This dataset is especially useful
+when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it
+allows the training step to proceed so long as some data is available.
+
+!! WARNING !! This dataset is not deterministic!
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ParallelMapDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_ParallelMapDataset.pbtxt
new file mode 100644
index 0000000000..313494dd73
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ParallelMapDataset.pbtxt
@@ -0,0 +1,15 @@
+op {
+ graph_op_name: "ParallelMapDataset"
+ in_arg {
+ name: "num_parallel_calls"
+ description: <<END
+The number of concurrent invocations of `f` that process
+elements from `input_dataset` in parallel.
+END
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
+ description: <<END
+Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up
+to `num_parallel_calls` copies of `f` in parallel.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ParameterizedTruncatedNormal.pbtxt b/tensorflow/core/api_def/base_api/api_def_ParameterizedTruncatedNormal.pbtxt
new file mode 100644
index 0000000000..a01c39a96a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ParameterizedTruncatedNormal.pbtxt
@@ -0,0 +1,66 @@
+op {
+ graph_op_name: "ParameterizedTruncatedNormal"
+ in_arg {
+ name: "shape"
+ description: <<END
+The shape of the output tensor. Batches are indexed by the 0th dimension.
+END
+ }
+ in_arg {
+ name: "means"
+ description: <<END
+The mean parameter of each batch.
+END
+ }
+ in_arg {
+ name: "stdevs"
+ description: <<END
+The standard deviation parameter of each batch. Must be greater than 0.
+END
+ }
+ in_arg {
+ name: "minvals"
+ description: <<END
+The minimum cutoff. May be -infinity.
+END
+ }
+ in_arg {
+ name: "maxvals"
+ description: <<END
+The maximum cutoff. May be +infinity, and must be more than the minval
+for each batch.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A matrix of shape num_batches x samples_per_batch, filled with random
+truncated normal values using the parameters for each row.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either `seed` or `seed2` are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the output.
+END
+ }
+ summary: "Outputs random values from a normal distribution. The parameters may each be a"
+ description: <<END
+scalar which applies to the entire output, or a vector of length shape[0] which
+stores the parameters for each batch.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ParseExample.pbtxt b/tensorflow/core/api_def/base_api/api_def_ParseExample.pbtxt
new file mode 100644
index 0000000000..4f404206ec
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ParseExample.pbtxt
@@ -0,0 +1,78 @@
+op {
+ graph_op_name: "ParseExample"
+ in_arg {
+ name: "serialized"
+ description: <<END
+A vector containing a batch of binary serialized Example protos.
+END
+ }
+ in_arg {
+ name: "names"
+ description: <<END
+A vector containing the names of the serialized protos.
+May contain, for example, table key (descriptive) names for the
+corresponding serialized protos. These are purely useful for debugging
+purposes, and the presence of values here has no effect on the output.
+May also be an empty vector if no names are available.
+If non-empty, this vector must be the same length as "serialized".
+END
+ }
+ in_arg {
+ name: "sparse_keys"
+ description: <<END
+A list of Nsparse string Tensors (scalars).
+The keys expected in the Examples' features associated with sparse values.
+END
+ }
+ in_arg {
+ name: "dense_keys"
+ description: <<END
+A list of Ndense string Tensors (scalars).
+The keys expected in the Examples' features associated with dense values.
+END
+ }
+ in_arg {
+ name: "dense_defaults"
+ description: <<END
+A list of Ndense Tensors (some may be empty).
+dense_defaults[j] provides default values
+when the example's feature_map lacks dense_key[j]. If an empty Tensor is
+provided for dense_defaults[j], then the Feature dense_keys[j] is required.
+The input type is inferred from dense_defaults[j], even when it's empty.
+If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
+then the shape of dense_defaults[j] must match that of dense_shapes[j].
+If dense_shapes[j] has an undefined major dimension (variable strides dense
+feature), dense_defaults[j] must contain a single element:
+the padding element.
+END
+ }
+ attr {
+ name: "sparse_types"
+ description: <<END
+A list of Nsparse types; the data types of data in each Feature
+given in sparse_keys.
+Currently the ParseExample supports DT_FLOAT (FloatList),
+DT_INT64 (Int64List), and DT_STRING (BytesList).
+END
+ }
+ attr {
+ name: "dense_shapes"
+ description: <<END
+A list of Ndense shapes; the shapes of data in each Feature
+given in dense_keys.
+The number of elements in the Feature corresponding to dense_key[j]
+must always equal dense_shapes[j].NumEntries().
+If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
+Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
+The dense outputs are just the inputs row-stacked by batch.
+This works for dense_shapes[j] = (-1, D1, ..., DN). In this case
+the shape of the output Tensor dense_values[j] will be
+(|serialized|, M, D1, .., DN), where M is the maximum number of blocks
+of elements of length D1 * .... * DN, across all minibatch entries
+in the input. Any minibatch entry with less than M blocks of elements of
+length D1 * ... * DN will be padded with the corresponding default_value
+scalar element along the second dimension.
+END
+ }
+ summary: "Transforms a vector of brain.Example protos (as strings) into typed tensors."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ParseSingleSequenceExample.pbtxt b/tensorflow/core/api_def/base_api/api_def_ParseSingleSequenceExample.pbtxt
new file mode 100644
index 0000000000..a087c11d46
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ParseSingleSequenceExample.pbtxt
@@ -0,0 +1,112 @@
+op {
+ graph_op_name: "ParseSingleSequenceExample"
+ in_arg {
+ name: "serialized"
+ description: <<END
+A scalar containing a binary serialized SequenceExample proto.
+END
+ }
+ in_arg {
+ name: "feature_list_dense_missing_assumed_empty"
+ description: <<END
+A vector listing the
+FeatureList keys which may be missing from the SequenceExample. If the
+associated FeatureList is missing, it is treated as empty. By default,
+any FeatureList not listed in this vector must exist in the SequenceExample.
+END
+ }
+ in_arg {
+ name: "context_sparse_keys"
+ description: <<END
+A list of Ncontext_sparse string Tensors (scalars).
+The keys expected in the Examples' features associated with context_sparse
+values.
+END
+ }
+ in_arg {
+ name: "context_dense_keys"
+ description: <<END
+A list of Ncontext_dense string Tensors (scalars).
+The keys expected in the SequenceExamples' context features associated with
+dense values.
+END
+ }
+ in_arg {
+ name: "feature_list_sparse_keys"
+ description: <<END
+A list of Nfeature_list_sparse string Tensors
+(scalars). The keys expected in the FeatureLists associated with sparse
+values.
+END
+ }
+ in_arg {
+ name: "feature_list_dense_keys"
+ description: <<END
+A list of Nfeature_list_dense string Tensors (scalars).
+The keys expected in the SequenceExamples' feature_lists associated
+with lists of dense values.
+END
+ }
+ in_arg {
+ name: "context_dense_defaults"
+ description: <<END
+A list of Ncontext_dense Tensors (some may be empty).
+context_dense_defaults[j] provides default values
+when the SequenceExample's context map lacks context_dense_key[j].
+If an empty Tensor is provided for context_dense_defaults[j],
+then the Feature context_dense_keys[j] is required.
+The input type is inferred from context_dense_defaults[j], even when it's
+empty. If context_dense_defaults[j] is not empty, its shape must match
+context_dense_shapes[j].
+END
+ }
+ in_arg {
+ name: "debug_name"
+ description: <<END
+A scalar containing the name of the serialized proto.
+May contain, for example, table key (descriptive) name for the
+corresponding serialized proto. This is purely useful for debugging
+purposes, and the presence of values here has no effect on the output.
+May also be an empty scalar if no name is available.
+END
+ }
+ attr {
+ name: "context_sparse_types"
+ description: <<END
+A list of Ncontext_sparse types; the data types of data in
+each context Feature given in context_sparse_keys.
+Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
+DT_INT64 (Int64List), and DT_STRING (BytesList).
+END
+ }
+ attr {
+ name: "context_dense_shapes"
+ description: <<END
+A list of Ncontext_dense shapes; the shapes of data in
+each context Feature given in context_dense_keys.
+The number of elements in the Feature corresponding to context_dense_key[j]
+must always equal context_dense_shapes[j].NumEntries().
+The shape of context_dense_values[j] will match context_dense_shapes[j].
+END
+ }
+ attr {
+ name: "feature_list_sparse_types"
+ description: <<END
+A list of Nfeature_list_sparse types; the data types
+of data in each FeatureList given in feature_list_sparse_keys.
+Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
+DT_INT64 (Int64List), and DT_STRING (BytesList).
+END
+ }
+ attr {
+ name: "feature_list_dense_shapes"
+ description: <<END
+A list of Nfeature_list_dense shapes; the shapes of
+data in each FeatureList given in feature_list_dense_keys.
+The shape of each Feature in the FeatureList corresponding to
+feature_list_dense_key[j] must always equal
+feature_list_dense_shapes[j].NumEntries().
+END
+ }
+ summary: "Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ParseTensor.pbtxt b/tensorflow/core/api_def/base_api/api_def_ParseTensor.pbtxt
new file mode 100644
index 0000000000..d05efdf095
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ParseTensor.pbtxt
@@ -0,0 +1,23 @@
+op {
+ graph_op_name: "ParseTensor"
+ in_arg {
+ name: "serialized"
+ description: <<END
+A scalar string containing a serialized TensorProto proto.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A Tensor of type `out_type`.
+END
+ }
+ attr {
+ name: "out_type"
+ description: <<END
+The type of the serialized tensor. The provided type must match the
+type of the serialized tensor and no implicit conversion will take place.
+END
+ }
+ summary: "Transforms a serialized tensorflow.TensorProto proto into a Tensor."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Placeholder.pbtxt b/tensorflow/core/api_def/base_api/api_def_Placeholder.pbtxt
new file mode 100644
index 0000000000..eb27bc6142
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Placeholder.pbtxt
@@ -0,0 +1,28 @@
+op {
+ graph_op_name: "Placeholder"
+ out_arg {
+ name: "output"
+ description: <<END
+A placeholder tensor that must be replaced using the feed mechanism.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of elements in the tensor.
+END
+ }
+ attr {
+ name: "shape"
+ description: <<END
+(Optional) The shape of the tensor. If the shape has 0 dimensions, the
+shape is unconstrained.
+END
+ }
+ summary: "A placeholder op for a value that will be fed into the computation."
+ description: <<END
+N.B. This operation will fail with an error if it is executed. It is
+intended as a way to represent a value that will always be fed, and to
+provide attrs that enable the fed value to be checked at runtime.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PlaceholderV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_PlaceholderV2.pbtxt
new file mode 100644
index 0000000000..c67f6e12e0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PlaceholderV2.pbtxt
@@ -0,0 +1,28 @@
+op {
+ graph_op_name: "PlaceholderV2"
+ out_arg {
+ name: "output"
+ description: <<END
+A placeholder tensor that must be replaced using the feed mechanism.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of elements in the tensor.
+END
+ }
+ attr {
+ name: "shape"
+ description: <<END
+The shape of the tensor. The shape can be any partially-specified
+shape. To be unconstrained, pass in a shape with unknown rank.
+END
+ }
+ summary: "A placeholder op for a value that will be fed into the computation."
+ description: <<END
+N.B. This operation will fail with an error if it is executed. It is
+intended as a way to represent a value that will always be fed, and to
+provide attrs that enable the fed value to be checked at runtime.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PlaceholderWithDefault.pbtxt b/tensorflow/core/api_def/base_api/api_def_PlaceholderWithDefault.pbtxt
new file mode 100644
index 0000000000..c20383faf5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PlaceholderWithDefault.pbtxt
@@ -0,0 +1,28 @@
+op {
+ graph_op_name: "PlaceholderWithDefault"
+ in_arg {
+ name: "input"
+ description: <<END
+The default value to produce when `output` is not fed.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A placeholder tensor that defaults to `input` if it is not fed.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of elements in the tensor.
+END
+ }
+ attr {
+ name: "shape"
+ description: <<END
+The (possibly partial) shape of the tensor.
+END
+ }
+ summary: "A placeholder op that passes through `input` when its output is not fed."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Polygamma.pbtxt b/tensorflow/core/api_def/base_api/api_def_Polygamma.pbtxt
new file mode 100644
index 0000000000..10bf370f54
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Polygamma.pbtxt
@@ -0,0 +1,12 @@
+op {
+ graph_op_name: "Polygamma"
+ summary: "Compute the polygamma function \\\\(\\psi^{(n)}(x)\\\\)."
+ description: <<END
+The polygamma function is defined as:
+
+
+\\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\)
+
+where \\(\psi(x)\\) is the digamma function.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PopulationCount.pbtxt b/tensorflow/core/api_def/base_api/api_def_PopulationCount.pbtxt
new file mode 100644
index 0000000000..97b106cd35
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PopulationCount.pbtxt
@@ -0,0 +1,12 @@
+op {
+ graph_op_name: "PopulationCount"
+ summary: "Computes element-wise population count (a.k.a. popcount, bitsum, bitcount)."
+ description: <<END
+For each entry in `x`, calculates the number of `1` (on) bits in the binary
+representation of that entry.
+
+**NOTE**: It is more efficient to first `tf.bitcast` your tensors into
+`int32` or `int64` and perform the bitcount on the result, than to feed in
+8- or 16-bit inputs and then aggregate the resulting counts.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Pow.pbtxt b/tensorflow/core/api_def/base_api/api_def_Pow.pbtxt
new file mode 100644
index 0000000000..8ace5f3100
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Pow.pbtxt
@@ -0,0 +1,14 @@
+op {
+ graph_op_name: "Pow"
+ summary: "Computes the power of one value to another."
+ description: <<END
+Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
+corresponding elements in `x` and `y`. For example:
+
+```
+# tensor 'x' is [[2, 2]], [3, 3]]
+# tensor 'y' is [[8, 16], [2, 3]]
+tf.pow(x, y) ==> [[256, 65536], [9, 27]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PrefetchDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_PrefetchDataset.pbtxt
new file mode 100644
index 0000000000..e158eedc6f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PrefetchDataset.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "PrefetchDataset"
+ in_arg {
+ name: "buffer_size"
+ description: <<END
+The maximum number of elements to buffer in an iterator over
+this dataset.
+END
+ }
+ summary: "Creates a dataset that asynchronously prefetches elements from `input_dataset`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PreventGradient.pbtxt b/tensorflow/core/api_def/base_api/api_def_PreventGradient.pbtxt
new file mode 100644
index 0000000000..6332192fb7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PreventGradient.pbtxt
@@ -0,0 +1,32 @@
+op {
+ graph_op_name: "PreventGradient"
+ in_arg {
+ name: "input"
+ description: <<END
+any tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+the same input tensor.
+END
+ }
+ attr {
+ name: "message"
+ description: <<END
+Will be printed in the error when anyone tries to differentiate
+this operation.
+END
+ }
+ summary: "An identity op that triggers an error if a gradient is requested."
+ description: <<END
+When executed in a graph, this op outputs its input tensor as-is.
+
+When building ops to compute gradients, the TensorFlow gradient system
+will return an error when trying to lookup the gradient of this op,
+because no gradient must ever be registered for this function. This
+op exists to prevent subtle bugs from silently returning unimplemented
+gradients in some corner cases.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Print.pbtxt b/tensorflow/core/api_def/base_api/api_def_Print.pbtxt
new file mode 100644
index 0000000000..effbde1623
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Print.pbtxt
@@ -0,0 +1,43 @@
+op {
+ graph_op_name: "Print"
+ in_arg {
+ name: "input"
+ description: <<END
+The tensor passed to `output`
+END
+ }
+ in_arg {
+ name: "data"
+ description: <<END
+A list of tensors to print out when op is evaluated.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+= The unmodified `input` tensor
+END
+ }
+ attr {
+ name: "message"
+ description: <<END
+A string, prefix of the error message.
+END
+ }
+ attr {
+ name: "first_n"
+ description: <<END
+Only log `first_n` number of times. -1 disables logging.
+END
+ }
+ attr {
+ name: "summarize"
+ description: <<END
+Only print this many entries of each tensor.
+END
+ }
+ summary: "Prints a list of tensors."
+ description: <<END
+Passes `input` through to `output` and prints `data` when evaluating.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PriorityQueue.pbtxt b/tensorflow/core/api_def/base_api/api_def_PriorityQueue.pbtxt
new file mode 100644
index 0000000000..6cbcef11f8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PriorityQueue.pbtxt
@@ -0,0 +1,54 @@
+op {
+ graph_op_name: "PriorityQueue"
+ visibility: SKIP
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the queue.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a value.
+END
+ }
+ attr {
+ name: "shapes"
+ description: <<END
+The shape of each component in a value. The length of this attr must
+be either 0 or the same as the length of component_types. If the length of
+this attr is 0, the shapes of queue elements are not constrained, and
+only one element may be dequeued at a time.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+The upper bound on the number of elements in this queue.
+Negative numbers mean no limit.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this queue is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this queue will be shared under the given name
+across multiple sessions.
+END
+ }
+ summary: "A queue that produces elements sorted by the first component value."
+ description: <<END
+Note that the PriorityQueue requires the first component of any element
+to be a scalar int64, in addition to the other elements declared by
+component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
+and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
+entry in their input (resp. output) lists.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PriorityQueueV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_PriorityQueueV2.pbtxt
new file mode 100644
index 0000000000..f0c1499e39
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PriorityQueueV2.pbtxt
@@ -0,0 +1,56 @@
+op {
+ graph_op_name: "PriorityQueueV2"
+ endpoint {
+ name: "PriorityQueue"
+ }
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the queue.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a value.
+END
+ }
+ attr {
+ name: "shapes"
+ description: <<END
+The shape of each component in a value. The length of this attr must
+be either 0 or the same as the length of component_types. If the length of
+this attr is 0, the shapes of queue elements are not constrained, and
+only one element may be dequeued at a time.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+The upper bound on the number of elements in this queue.
+Negative numbers mean no limit.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this queue is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this queue will be shared under the given name
+across multiple sessions.
+END
+ }
+ summary: "A queue that produces elements sorted by the first component value."
+ description: <<END
+Note that the PriorityQueue requires the first component of any element
+to be a scalar int64, in addition to the other elements declared by
+component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
+and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
+entry in their input (resp. output) lists.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Prod.pbtxt b/tensorflow/core/api_def/base_api/api_def_Prod.pbtxt
new file mode 100644
index 0000000000..02b6e425f8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Prod.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "Prod"
+ endpoint {
+ name: "Prod"
+ }
+ endpoint {
+ name: "ReduceProd"
+ }
+ in_arg {
+ name: "input"
+ description: <<END
+The tensor to reduce.
+END
+ }
+ in_arg {
+ name: "reduction_indices"
+ rename_to: "axis"
+ description: <<END
+The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The reduced tensor.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If true, retain reduced dimensions with length 1.
+END
+ }
+ summary: "Computes the product of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PyFunc.pbtxt b/tensorflow/core/api_def/base_api/api_def_PyFunc.pbtxt
new file mode 100644
index 0000000000..4b8bcf5e12
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PyFunc.pbtxt
@@ -0,0 +1,40 @@
+op {
+ graph_op_name: "PyFunc"
+ visibility: SKIP
+ in_arg {
+ name: "input"
+ description: <<END
+List of Tensors that will provide input to the Op.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The outputs from the Op.
+END
+ }
+ attr {
+ name: "token"
+ description: <<END
+A token representing a registered python function in this address space.
+END
+ }
+ attr {
+ name: "Tin"
+ description: <<END
+Data types of the inputs to the op.
+END
+ }
+ attr {
+ name: "Tout"
+ description: <<END
+Data types of the outputs from the op.
+The length of the list specifies the number of outputs.
+END
+ }
+ summary: "Invokes a python function to compute func(input)->output."
+ description: <<END
+This operation is considered stateful. For a stateless version, see
+PyFuncStateless.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_PyFuncStateless.pbtxt b/tensorflow/core/api_def/base_api/api_def_PyFuncStateless.pbtxt
new file mode 100644
index 0000000000..1296292862
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_PyFuncStateless.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "PyFuncStateless"
+ visibility: SKIP
+ summary: "A stateless version of PyFunc."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Q.pbtxt b/tensorflow/core/api_def/base_api/api_def_Q.pbtxt
deleted file mode 100644
index 4af60a1841..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_Q.pbtxt
+++ /dev/null
@@ -1,609 +0,0 @@
-op {
- graph_op_name: "Qr"
- endpoint {
- name: "Qr"
- }
- summary: "Computes the QR decompositions of one or more matrices."
- description: <<END
-Computes the QR decomposition of each inner matrix in `tensor` such that
-`tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
-
-```python
-# a is a tensor.
-# q is a tensor of orthonormal matrices.
-# r is a tensor of upper triangular matrices.
-q, r = qr(a)
-q_full, r_full = qr(a, full_matrices=True)
-```
-END
-}
-op {
- graph_op_name: "QuantizeAndDequantize"
- endpoint {
- name: "QuantizeAndDequantize"
- }
- summary: "Use QuantizeAndDequantizeV2 instead."
-}
-op {
- graph_op_name: "QuantizeAndDequantizeV2"
- endpoint {
- name: "QuantizeAndDequantizeV2"
- }
- summary: "Quantizes then dequantizes a tensor."
- description: <<END
-This op simulates the precision loss from the quantized forward pass by:
-1. Quantizing the tensor to fixed point numbers, which should match the target
- quantization method when it is used in inference.
-2. Dequantizing it back to floating point numbers for the following ops, most
- likely matmul.
-
-There are different ways to quantize. This version does not use the full range
-of the output type, choosing to elide the lowest possible value for symmetry
-(e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit
-quantization), so that 0.0 maps to 0.
-
-To perform this op, we first find the range of values in our tensor. The range
-we use is always centered on 0, so we find m such that
-
-1. m = max(abs(input_min), abs(input_max)) if range_given is true,
-2. m = max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
-
-Our input tensor range is then [-m, m].
-
-Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].
-If signed_input is true, this is
-
- [min_fixed, max_fixed ] =
- [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].
-
-Otherwise, if signed_input is false, the fixed-point range is
-
- [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
-
-From this we compute our scaling factor, s:
-
- s = (max_fixed - min_fixed) / (2 * m).
-
-Now we can quantize and dequantize the elements of our tensor. An element e
-is transformed into e':
-
- e' = (e * s).round_to_nearest() / s.
-
-Note that we have a different number of buckets in the signed vs. unsigned
-cases. For example, if num_bits == 8, we get 254 buckets in the signed case
-vs. 255 in the unsigned case.
-
-For example, suppose num_bits = 8 and m = 1. Then
-
- [min_fixed, max_fixed] = [-127, 127], and
- s = (127 + 127) / 2 = 127.
-
-Given the vector {-1, -0.5, 0, 0.3}, this is quantized to
-{-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.
-END
-}
-op {
- graph_op_name: "QuantizeAndDequantizeV3"
- endpoint {
- name: "QuantizeAndDequantizeV3"
- }
- summary: "Quantizes then dequantizes a tensor."
- description: <<END
-This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
-tensor, so its value can change during training.
-END
-}
-op {
- graph_op_name: "QuantizeDownAndShrinkRange"
- endpoint {
- name: "QuantizeDownAndShrinkRange"
- }
- summary: "Convert the quantized \'input\' tensor into a lower-precision \'output\', using the"
- description: <<END
-actual distribution of the values to maximize the usage of the lower bit depth
-and adjusting the output min and max ranges accordingly.
-
-[input_min, input_max] are scalar floats that specify the range for the float
-interpretation of the 'input' data. For example, if input_min is -1.0f and
-input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
-value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
-
-This operator tries to squeeze as much precision as possible into an output with
-a lower bit depth by calculating the actual min and max values found in the
-data. For example, maybe that quint16 input has no values lower than 16,384 and
-none higher than 49,152. That means only half the range is actually needed, all
-the float interpretations are between -0.5f and 0.5f, so if we want to compress
-the data into a quint8 output, we can use that range rather than the theoretical
--1.0f to 1.0f that is suggested by the input min and max.
-
-In practice, this is most useful for taking output from operations like
-QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
-may have large potential output ranges, but in practice have a distribution of
-input values that only uses a small fraction of the possible range. By feeding
-that output into this operator, we can reduce it from 32 bits down to 8 with
-minimal loss of accuracy.
-END
-}
-op {
- graph_op_name: "QuantizeV2"
- endpoint {
- name: "QuantizeV2"
- }
- summary: "Quantize the \'input\' tensor of type float to \'output\' tensor of type \'T\'."
- description: <<END
-[min_range, max_range] are scalar floats that specify the range for
-the 'input' data. The 'mode' attribute controls exactly which calculations are
-used to convert the float values to their quantized equivalents.
-
-In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
-
-```
-out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
-if T == qint8, out[i] -= (range(T) + 1) / 2.0
-```
-here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
-
-*MIN_COMBINED Mode Example*
-
-Assume the input is type float and has a possible range of [0.0, 6.0] and the
-output type is quint8 ([0, 255]). The min_range and max_range values should be
-specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
-value of the input by 255/6 and cast to quint8.
-
-If the output type was qint8 ([-128, 127]), the operation will additionally
-subtract each value by 128 prior to casting, so that the range of values aligns
-with the range of qint8.
-
-If the mode is 'MIN_FIRST', then this approach is used:
-
-```
-number_of_steps = 1 << (# of bits in T)
-range_adjust = number_of_steps / (number_of_steps - 1)
-range = (range_max - range_min) * range_adjust
-range_scale = number_of_steps / range
-quantized = round(input * range_scale) - round(range_min * range_scale) +
- numeric_limits<T>::min()
-quantized = max(quantized, numeric_limits<T>::min())
-quantized = min(quantized, numeric_limits<T>::max())
-```
-
-The biggest difference between this and MIN_COMBINED is that the minimum range
-is rounded first, before it's subtracted from the rounded value. With
-MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
-and dequantizing will introduce a larger and larger error.
-
-*SCALED mode Example*
-
-`SCALED` mode matches the quantization approach used in
-`QuantizeAndDequantize{V2|V3}`.
-
-If the mode is `SCALED`, we do not use the full range of the output type,
-choosing to elide the lowest possible value for symmetry (e.g., output range is
--127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
-0.
-
-We first find the range of values in our tensor. The
-range we use is always centered on 0, so we find m such that
-```c++
- m = max(abs(input_min), abs(input_max))
-```
-
-Our input tensor range is then `[-m, m]`.
-
-Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
-If T is signed, this is
-```
- num_bits = sizeof(T) * 8
- [min_fixed, max_fixed] =
- [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
-```
-
-Otherwise, if T is unsigned, the fixed-point range is
-```
- [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
-```
-
-From this we compute our scaling factor, s:
-```c++
- s = (max_fixed - min_fixed) / (2 * m)
-```
-
-Now we can quantize the elements of our tensor:
-```c++
-result = (input * s).round_to_nearest()
-```
-
-One thing to watch out for is that the operator may choose to adjust the
-requested minimum and maximum values slightly during the quantization process,
-so you should always use the output ports as the range for further calculations.
-For example, if the requested minimum and maximum values are close to equal,
-they will be separated by a small epsilon value to prevent ill-formed quantized
-buffers from being created. Otherwise, you can end up with buffers where all the
-quantized values map to the same float value, which causes problems for
-operations that have to perform further calculations on them.
-END
-}
-op {
- graph_op_name: "QuantizedAdd"
- endpoint {
- name: "QuantizedAdd"
- }
- summary: "Returns x + y element-wise, working on quantized buffers."
-}
-op {
- graph_op_name: "QuantizedAvgPool"
- endpoint {
- name: "QuantizedAvgPool"
- }
- summary: "Produces the average pool of the input tensor for quantized types."
-}
-op {
- graph_op_name: "QuantizedBatchNormWithGlobalNormalization"
- endpoint {
- name: "QuantizedBatchNormWithGlobalNormalization"
- }
- summary: "Quantized Batch normalization."
- description: <<END
-This op is deprecated and will be removed in the future. Prefer
-`tf.nn.batch_normalization`.
-END
-}
-op {
- graph_op_name: "QuantizedBiasAdd"
- endpoint {
- name: "QuantizedBiasAdd"
- }
- summary: "Adds Tensor \'bias\' to Tensor \'input\' for Quantized types."
- description: <<END
-Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
-END
-}
-op {
- graph_op_name: "QuantizedConcat"
- endpoint {
- name: "QuantizedConcat"
- }
- summary: "Concatenates quantized tensors along one dimension."
-}
-op {
- graph_op_name: "QuantizedConv2D"
- endpoint {
- name: "QuantizedConv2D"
- }
- summary: "Computes a 2D convolution given quantized 4D input and filter tensors."
- description: <<END
-The inputs are quantized tensors where the lowest value represents the real
-number of the associated minimum, and the highest represents the maximum.
-This means that you can only interpret the quantized output in the same way, by
-taking the returned minimum and maximum values into account.
-END
-}
-op {
- graph_op_name: "QuantizedInstanceNorm"
- endpoint {
- name: "QuantizedInstanceNorm"
- }
- summary: "Quantized Instance normalization."
-}
-op {
- graph_op_name: "QuantizedMatMul"
- endpoint {
- name: "QuantizedMatMul"
- }
- summary: "Perform a quantized matrix multiplication of `a` by the matrix `b`."
- description: <<END
-The inputs must be two-dimensional matrices and the inner dimension of
-`a` (after being transposed if `transpose_a` is non-zero) must match the
-outer dimension of `b` (after being transposed if `transposed_b` is
-non-zero).
-END
-}
-op {
- graph_op_name: "QuantizedMaxPool"
- endpoint {
- name: "QuantizedMaxPool"
- }
- summary: "Produces the max pool of the input tensor for quantized types."
-}
-op {
- graph_op_name: "QuantizedMul"
- endpoint {
- name: "QuantizedMul"
- }
- summary: "Returns x * y element-wise, working on quantized buffers."
-}
-op {
- graph_op_name: "QuantizedRelu"
- endpoint {
- name: "QuantizedRelu"
- }
- summary: "Computes Quantized Rectified Linear: `max(features, 0)`"
-}
-op {
- graph_op_name: "QuantizedRelu6"
- endpoint {
- name: "QuantizedRelu6"
- }
- summary: "Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`"
-}
-op {
- graph_op_name: "QuantizedReluX"
- endpoint {
- name: "QuantizedReluX"
- }
- summary: "Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`"
-}
-op {
- graph_op_name: "QuantizedReshape"
- endpoint {
- name: "QuantizedReshape"
- }
- summary: "Reshapes a quantized tensor as per the Reshape op."
- description: <<END
-```
-END
-}
-op {
- graph_op_name: "QuantizedResizeBilinear"
- endpoint {
- name: "QuantizedResizeBilinear"
- }
- summary: "Resize quantized `images` to `size` using quantized bilinear interpolation."
- description: <<END
-Input images and output images must be quantized types.
-END
-}
-op {
- graph_op_name: "QueueClose"
- endpoint {
- name: "QueueClose"
- }
- summary: "Closes the given queue."
- description: <<END
-This operation signals that no more elements will be enqueued in the
-given queue. Subsequent Enqueue(Many) operations will fail.
-Subsequent Dequeue(Many) operations will continue to succeed if
-sufficient elements remain in the queue. Subsequent Dequeue(Many)
-operations that would block will fail immediately.
-END
-}
-op {
- graph_op_name: "QueueCloseV2"
- endpoint {
- name: "QueueCloseV2"
- }
- summary: "Closes the given queue."
- description: <<END
-This operation signals that no more elements will be enqueued in the
-given queue. Subsequent Enqueue(Many) operations will fail.
-Subsequent Dequeue(Many) operations will continue to succeed if
-sufficient elements remain in the queue. Subsequent Dequeue(Many)
-operations that would block will fail immediately.
-END
-}
-op {
- graph_op_name: "QueueDequeue"
- endpoint {
- name: "QueueDequeue"
- }
- summary: "Dequeues a tuple of one or more tensors from the given queue."
- description: <<END
-This operation has k outputs, where k is the number of components
-in the tuples stored in the given queue, and output i is the ith
-component of the dequeued tuple.
-
-N.B. If the queue is empty, this operation will block until an element
-has been dequeued (or 'timeout_ms' elapses, if specified).
-END
-}
-op {
- graph_op_name: "QueueDequeueMany"
- endpoint {
- name: "QueueDequeueMany"
- }
- summary: "Dequeues `n` tuples of one or more tensors from the given queue."
- description: <<END
-If the queue is closed and there are fewer than `n` elements, then an
-OutOfRange error is returned.
-
-This operation concatenates queue-element component tensors along the
-0th dimension to make a single component tensor. All of the components
-in the dequeued tuple will have size `n` in the 0th dimension.
-
-This operation has `k` outputs, where `k` is the number of components in
-the tuples stored in the given queue, and output `i` is the ith
-component of the dequeued tuple.
-
-N.B. If the queue is empty, this operation will block until `n` elements
-have been dequeued (or 'timeout_ms' elapses, if specified).
-END
-}
-op {
- graph_op_name: "QueueDequeueManyV2"
- endpoint {
- name: "QueueDequeueManyV2"
- }
- summary: "Dequeues `n` tuples of one or more tensors from the given queue."
- description: <<END
-If the queue is closed and there are fewer than `n` elements, then an
-OutOfRange error is returned.
-
-This operation concatenates queue-element component tensors along the
-0th dimension to make a single component tensor. All of the components
-in the dequeued tuple will have size `n` in the 0th dimension.
-
-This operation has `k` outputs, where `k` is the number of components in
-the tuples stored in the given queue, and output `i` is the ith
-component of the dequeued tuple.
-
-N.B. If the queue is empty, this operation will block until `n` elements
-have been dequeued (or 'timeout_ms' elapses, if specified).
-END
-}
-op {
- graph_op_name: "QueueDequeueUpTo"
- endpoint {
- name: "QueueDequeueUpTo"
- }
- summary: "Dequeues `n` tuples of one or more tensors from the given queue."
- description: <<END
-This operation is not supported by all queues. If a queue does not support
-DequeueUpTo, then an Unimplemented error is returned.
-
-If the queue is closed and there are more than 0 but less than `n`
-elements remaining, then instead of returning an OutOfRange error like
-QueueDequeueMany, less than `n` elements are returned immediately. If
-the queue is closed and there are 0 elements left in the queue, then
-an OutOfRange error is returned just like in QueueDequeueMany.
-Otherwise the behavior is identical to QueueDequeueMany:
-
-This operation concatenates queue-element component tensors along the
-0th dimension to make a single component tensor. All of the components
-in the dequeued tuple will have size `n` in the 0th dimension.
-
-This operation has k outputs, where `k` is the number of components in
-the tuples stored in the given queue, and output `i` is the ith
-component of the dequeued tuple.
-END
-}
-op {
- graph_op_name: "QueueDequeueUpToV2"
- endpoint {
- name: "QueueDequeueUpToV2"
- }
- summary: "Dequeues `n` tuples of one or more tensors from the given queue."
- description: <<END
-This operation is not supported by all queues. If a queue does not support
-DequeueUpTo, then an Unimplemented error is returned.
-
-If the queue is closed and there are more than 0 but less than `n`
-elements remaining, then instead of returning an OutOfRange error like
-QueueDequeueMany, less than `n` elements are returned immediately. If
-the queue is closed and there are 0 elements left in the queue, then
-an OutOfRange error is returned just like in QueueDequeueMany.
-Otherwise the behavior is identical to QueueDequeueMany:
-
-This operation concatenates queue-element component tensors along the
-0th dimension to make a single component tensor. All of the components
-in the dequeued tuple will have size n in the 0th dimension.
-
-This operation has `k` outputs, where `k` is the number of components in
-the tuples stored in the given queue, and output `i` is the ith
-component of the dequeued tuple.
-END
-}
-op {
- graph_op_name: "QueueDequeueV2"
- endpoint {
- name: "QueueDequeueV2"
- }
- summary: "Dequeues a tuple of one or more tensors from the given queue."
- description: <<END
-This operation has k outputs, where k is the number of components
-in the tuples stored in the given queue, and output i is the ith
-component of the dequeued tuple.
-
-N.B. If the queue is empty, this operation will block until an element
-has been dequeued (or 'timeout_ms' elapses, if specified).
-END
-}
-op {
- graph_op_name: "QueueEnqueue"
- endpoint {
- name: "QueueEnqueue"
- }
- summary: "Enqueues a tuple of one or more tensors in the given queue."
- description: <<END
-The components input has k elements, which correspond to the components of
-tuples stored in the given queue.
-
-N.B. If the queue is full, this operation will block until the given
-element has been enqueued (or 'timeout_ms' elapses, if specified).
-END
-}
-op {
- graph_op_name: "QueueEnqueueMany"
- endpoint {
- name: "QueueEnqueueMany"
- }
- summary: "Enqueues zero or more tuples of one or more tensors in the given queue."
- description: <<END
-This operation slices each component tensor along the 0th dimension to
-make multiple queue elements. All of the tuple components must have the
-same size in the 0th dimension.
-
-The components input has k elements, which correspond to the components of
-tuples stored in the given queue.
-
-N.B. If the queue is full, this operation will block until the given
-elements have been enqueued (or 'timeout_ms' elapses, if specified).
-END
-}
-op {
- graph_op_name: "QueueEnqueueManyV2"
- endpoint {
- name: "QueueEnqueueManyV2"
- }
- summary: "Enqueues zero or more tuples of one or more tensors in the given queue."
- description: <<END
-This operation slices each component tensor along the 0th dimension to
-make multiple queue elements. All of the tuple components must have the
-same size in the 0th dimension.
-
-The components input has k elements, which correspond to the components of
-tuples stored in the given queue.
-
-N.B. If the queue is full, this operation will block until the given
-elements have been enqueued (or 'timeout_ms' elapses, if specified).
-END
-}
-op {
- graph_op_name: "QueueEnqueueV2"
- endpoint {
- name: "QueueEnqueueV2"
- }
- summary: "Enqueues a tuple of one or more tensors in the given queue."
- description: <<END
-The components input has k elements, which correspond to the components of
-tuples stored in the given queue.
-
-N.B. If the queue is full, this operation will block until the given
-element has been enqueued (or 'timeout_ms' elapses, if specified).
-END
-}
-op {
- graph_op_name: "QueueIsClosed"
- endpoint {
- name: "QueueIsClosed"
- }
- summary: "Returns true if queue is closed."
- description: <<END
-This operation returns true if the queue is closed and false if the queue
-is open.
-END
-}
-op {
- graph_op_name: "QueueIsClosedV2"
- endpoint {
- name: "QueueIsClosedV2"
- }
- summary: "Returns true if queue is closed."
- description: <<END
-This operation returns true if the queue is closed and false if the queue
-is open.
-END
-}
-op {
- graph_op_name: "QueueSize"
- endpoint {
- name: "QueueSize"
- }
- summary: "Computes the number of elements in the given queue."
-}
-op {
- graph_op_name: "QueueSizeV2"
- endpoint {
- name: "QueueSizeV2"
- }
- summary: "Computes the number of elements in the given queue."
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_Qr.pbtxt b/tensorflow/core/api_def/base_api/api_def_Qr.pbtxt
new file mode 100644
index 0000000000..ac8f7597aa
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Qr.pbtxt
@@ -0,0 +1,45 @@
+op {
+ graph_op_name: "Qr"
+ in_arg {
+ name: "input"
+ description: <<END
+A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
+form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
+END
+ }
+ out_arg {
+ name: "q"
+ description: <<END
+Orthonormal basis for range of `a`. If `full_matrices` is `False` then
+shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
+`[..., M, M]`.
+END
+ }
+ out_arg {
+ name: "r"
+ description: <<END
+Triangular factor. If `full_matrices` is `False` then shape is
+`[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.
+END
+ }
+ attr {
+ name: "full_matrices"
+ description: <<END
+If true, compute full-sized `q` and `r`. If false
+(the default), compute only the leading `P` columns of `q`.
+END
+ }
+ summary: "Computes the QR decompositions of one or more matrices."
+ description: <<END
+Computes the QR decomposition of each inner matrix in `tensor` such that
+`tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
+
+```python
+# a is a tensor.
+# q is a tensor of orthonormal matrices.
+# r is a tensor of upper triangular matrices.
+q, r = qr(a)
+q_full, r_full = qr(a, full_matrices=True)
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantize.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantize.pbtxt
new file mode 100644
index 0000000000..8d84144d33
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QuantizeAndDequantize"
+ summary: "Use QuantizeAndDequantizeV2 instead."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV2.pbtxt
new file mode 100644
index 0000000000..1fc9c9034a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV2.pbtxt
@@ -0,0 +1,93 @@
+op {
+ graph_op_name: "QuantizeAndDequantizeV2"
+ in_arg {
+ name: "input"
+ description: <<END
+Tensor to quantize and then dequantize.
+END
+ }
+ in_arg {
+ name: "input_min"
+ description: <<END
+If range_given, this is the min of the range, otherwise this input
+will be ignored.
+END
+ }
+ in_arg {
+ name: "input_max"
+ description: <<END
+If range_given, this is the max of the range, otherwise this input
+will be ignored.
+END
+ }
+ attr {
+ name: "signed_input"
+ description: <<END
+If the quantization is signed or unsigned.
+END
+ }
+ attr {
+ name: "num_bits"
+ description: <<END
+The bitwidth of the quantization.
+END
+ }
+ attr {
+ name: "range_given"
+ description: <<END
+If the range is given or should be computed from the tensor.
+END
+ }
+ summary: "Quantizes then dequantizes a tensor."
+ description: <<END
+This op simulates the precision loss from the quantized forward pass by:
+1. Quantizing the tensor to fixed point numbers, which should match the target
+ quantization method when it is used in inference.
+2. Dequantizing it back to floating point numbers for the following ops, most
+ likely matmul.
+
+There are different ways to quantize. This version does not use the full range
+of the output type, choosing to elide the lowest possible value for symmetry
+(e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit
+quantization), so that 0.0 maps to 0.
+
+To perform this op, we first find the range of values in our tensor. The range
+we use is always centered on 0, so we find m such that
+
+1. m = max(abs(input_min), abs(input_max)) if range_given is true,
+2. m = max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
+
+Our input tensor range is then [-m, m].
+
+Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].
+If signed_input is true, this is
+
+ [min_fixed, max_fixed ] =
+ [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].
+
+Otherwise, if signed_input is false, the fixed-point range is
+
+ [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
+
+From this we compute our scaling factor, s:
+
+ s = (max_fixed - min_fixed) / (2 * m).
+
+Now we can quantize and dequantize the elements of our tensor. An element e
+is transformed into e':
+
+ e' = (e * s).round_to_nearest() / s.
+
+Note that we have a different number of buckets in the signed vs. unsigned
+cases. For example, if num_bits == 8, we get 254 buckets in the signed case
+vs. 255 in the unsigned case.
+
+For example, suppose num_bits = 8 and m = 1. Then
+
+ [min_fixed, max_fixed] = [-127, 127], and
+ s = (127 + 127) / 2 = 127.
+
+Given the vector {-1, -0.5, 0, 0.3}, this is quantized to
+{-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV3.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV3.pbtxt
new file mode 100644
index 0000000000..57128a842a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV3.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "QuantizeAndDequantizeV3"
+ summary: "Quantizes then dequantizes a tensor."
+ description: <<END
+This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
+tensor, so its value can change during training.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizeDownAndShrinkRange.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizeDownAndShrinkRange.pbtxt
new file mode 100644
index 0000000000..af7729e238
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizeDownAndShrinkRange.pbtxt
@@ -0,0 +1,64 @@
+op {
+ graph_op_name: "QuantizeDownAndShrinkRange"
+ in_arg {
+ name: "input_min"
+ description: <<END
+The float value that the minimum quantized input value represents.
+END
+ }
+ in_arg {
+ name: "input_max"
+ description: <<END
+The float value that the maximum quantized input value represents.
+END
+ }
+ out_arg {
+ name: "output_min"
+ description: <<END
+The float value that the minimum quantized output value represents.
+END
+ }
+ out_arg {
+ name: "output_max"
+ description: <<END
+The float value that the maximum quantized output value represents.
+END
+ }
+ attr {
+ name: "Tinput"
+ description: <<END
+The type of the input.
+END
+ }
+ attr {
+ name: "out_type"
+ description: <<END
+The type of the output. Should be a lower bit depth than Tinput.
+END
+ }
+ summary: "Convert the quantized \'input\' tensor into a lower-precision \'output\', using the"
+ description: <<END
+actual distribution of the values to maximize the usage of the lower bit depth
+and adjusting the output min and max ranges accordingly.
+
+[input_min, input_max] are scalar floats that specify the range for the float
+interpretation of the 'input' data. For example, if input_min is -1.0f and
+input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
+value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
+
+This operator tries to squeeze as much precision as possible into an output with
+a lower bit depth by calculating the actual min and max values found in the
+data. For example, maybe that quint16 input has no values lower than 16,384 and
+none higher than 49,152. That means only half the range is actually needed, all
+the float interpretations are between -0.5f and 0.5f, so if we want to compress
+the data into a quint8 output, we can use that range rather than the theoretical
+-1.0f to 1.0f that is suggested by the input min and max.
+
+In practice, this is most useful for taking output from operations like
+QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
+may have large potential output ranges, but in practice have a distribution of
+input values that only uses a small fraction of the possible range. By feeding
+that output into this operator, we can reduce it from 32 bits down to 8 with
+minimal loss of accuracy.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizeV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizeV2.pbtxt
new file mode 100644
index 0000000000..b9e75caf02
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizeV2.pbtxt
@@ -0,0 +1,128 @@
+op {
+ graph_op_name: "QuantizeV2"
+ in_arg {
+ name: "min_range"
+ description: <<END
+The minimum scalar value possibly produced for the input.
+END
+ }
+ in_arg {
+ name: "max_range"
+ description: <<END
+The maximum scalar value possibly produced for the input.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The quantized data produced from the float input.
+END
+ }
+ out_arg {
+ name: "output_min"
+ description: <<END
+The actual minimum scalar value used for the output.
+END
+ }
+ out_arg {
+ name: "output_max"
+ description: <<END
+The actual maximum scalar value used for the output.
+END
+ }
+ summary: "Quantize the \'input\' tensor of type float to \'output\' tensor of type \'T\'."
+ description: <<END
+[min_range, max_range] are scalar floats that specify the range for
+the 'input' data. The 'mode' attribute controls exactly which calculations are
+used to convert the float values to their quantized equivalents. The
+'round_mode' attribute controls which rounding tie-breaking algorithm is used
+when rounding float values to their quantized equivalents.
+
+In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
+
+```
+out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
+if T == qint8, out[i] -= (range(T) + 1) / 2.0
+```
+here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
+
+*MIN_COMBINED Mode Example*
+
+Assume the input is type float and has a possible range of [0.0, 6.0] and the
+output type is quint8 ([0, 255]). The min_range and max_range values should be
+specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
+value of the input by 255/6 and cast to quint8.
+
+If the output type was qint8 ([-128, 127]), the operation will additionally
+subtract each value by 128 prior to casting, so that the range of values aligns
+with the range of qint8.
+
+If the mode is 'MIN_FIRST', then this approach is used:
+
+```
+num_discrete_values = 1 << (# of bits in T)
+range_adjust = num_discrete_values / (num_discrete_values - 1)
+range = (range_max - range_min) * range_adjust
+range_scale = num_discrete_values / range
+quantized = round(input * range_scale) - round(range_min * range_scale) +
+ numeric_limits<T>::min()
+quantized = max(quantized, numeric_limits<T>::min())
+quantized = min(quantized, numeric_limits<T>::max())
+```
+
+The biggest difference between this and MIN_COMBINED is that the minimum range
+is rounded first, before it's subtracted from the rounded value. With
+MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
+and dequantizing will introduce a larger and larger error.
+
+*SCALED mode Example*
+
+`SCALED` mode matches the quantization approach used in
+`QuantizeAndDequantize{V2|V3}`.
+
+If the mode is `SCALED`, we do not use the full range of the output type,
+choosing to elide the lowest possible value for symmetry (e.g., output range is
+-127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
+0.
+
+We first find the range of values in our tensor. The
+range we use is always centered on 0, so we find m such that
+```c++
+ m = max(abs(input_min), abs(input_max))
+```
+
+Our input tensor range is then `[-m, m]`.
+
+Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
+If T is signed, this is
+```
+ num_bits = sizeof(T) * 8
+ [min_fixed, max_fixed] =
+ [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
+```
+
+Otherwise, if T is unsigned, the fixed-point range is
+```
+ [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
+```
+
+From this we compute our scaling factor, s:
+```c++
+ s = (max_fixed - min_fixed) / (2 * m)
+```
+
+Now we can quantize the elements of our tensor:
+```c++
+result = round(input * s)
+```
+
+One thing to watch out for is that the operator may choose to adjust the
+requested minimum and maximum values slightly during the quantization process,
+so you should always use the output ports as the range for further calculations.
+For example, if the requested minimum and maximum values are close to equal,
+they will be separated by a small epsilon value to prevent ill-formed quantized
+buffers from being created. Otherwise, you can end up with buffers where all the
+quantized values map to the same float value, which causes problems for
+operations that have to perform further calculations on them.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedAdd.pbtxt
new file mode 100644
index 0000000000..193bee4db9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedAdd.pbtxt
@@ -0,0 +1,43 @@
+op {
+ graph_op_name: "QuantizedAdd"
+ in_arg {
+ name: "min_x"
+ description: <<END
+The float value that the lowest quantized `x` value represents.
+END
+ }
+ in_arg {
+ name: "max_x"
+ description: <<END
+The float value that the highest quantized `x` value represents.
+END
+ }
+ in_arg {
+ name: "min_y"
+ description: <<END
+The float value that the lowest quantized `y` value represents.
+END
+ }
+ in_arg {
+ name: "max_y"
+ description: <<END
+The float value that the highest quantized `y` value represents.
+END
+ }
+ out_arg {
+ name: "min_z"
+ description: <<END
+The float value that the lowest quantized output value represents.
+END
+ }
+ out_arg {
+ name: "max_z"
+ description: <<END
+The float value that the highest quantized output value represents.
+
+*NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about
+broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+ }
+ summary: "Returns x + y element-wise, working on quantized buffers."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedAvgPool.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedAvgPool.pbtxt
new file mode 100644
index 0000000000..912ab54026
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedAvgPool.pbtxt
@@ -0,0 +1,54 @@
+op {
+ graph_op_name: "QuantizedAvgPool"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "min_input"
+ description: <<END
+The float value that the lowest quantized input value represents.
+END
+ }
+ in_arg {
+ name: "max_input"
+ description: <<END
+The float value that the highest quantized input value represents.
+END
+ }
+ out_arg {
+ name: "min_output"
+ description: <<END
+The float value that the lowest quantized output value represents.
+END
+ }
+ out_arg {
+ name: "max_output"
+ description: <<END
+The float value that the highest quantized output value represents.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+The size of the window for each dimension of the input tensor.
+The length must be 4 to match the number of dimensions of the input.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the input
+tensor. The length must be 4 to match the number of dimensions of the input.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Produces the average pool of the input tensor for quantized types."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedBatchNormWithGlobalNormalization.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedBatchNormWithGlobalNormalization.pbtxt
new file mode 100644
index 0000000000..27990db1d0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedBatchNormWithGlobalNormalization.pbtxt
@@ -0,0 +1,118 @@
+op {
+ graph_op_name: "QuantizedBatchNormWithGlobalNormalization"
+ in_arg {
+ name: "t"
+ description: <<END
+A 4D input Tensor.
+END
+ }
+ in_arg {
+ name: "t_min"
+ description: <<END
+The value represented by the lowest quantized input.
+END
+ }
+ in_arg {
+ name: "t_max"
+ description: <<END
+The value represented by the highest quantized input.
+END
+ }
+ in_arg {
+ name: "m"
+ description: <<END
+A 1D mean Tensor with size matching the last dimension of t.
+This is the first output from tf.nn.moments,
+or a saved moving average thereof.
+END
+ }
+ in_arg {
+ name: "m_min"
+ description: <<END
+The value represented by the lowest quantized mean.
+END
+ }
+ in_arg {
+ name: "m_max"
+ description: <<END
+The value represented by the highest quantized mean.
+END
+ }
+ in_arg {
+ name: "v"
+ description: <<END
+A 1D variance Tensor with size matching the last dimension of t.
+This is the second output from tf.nn.moments,
+or a saved moving average thereof.
+END
+ }
+ in_arg {
+ name: "v_min"
+ description: <<END
+The value represented by the lowest quantized variance.
+END
+ }
+ in_arg {
+ name: "v_max"
+ description: <<END
+The value represented by the highest quantized variance.
+END
+ }
+ in_arg {
+ name: "beta"
+ description: <<END
+A 1D beta Tensor with size matching the last dimension of t.
+An offset to be added to the normalized tensor.
+END
+ }
+ in_arg {
+ name: "beta_min"
+ description: <<END
+The value represented by the lowest quantized offset.
+END
+ }
+ in_arg {
+ name: "beta_max"
+ description: <<END
+The value represented by the highest quantized offset.
+END
+ }
+ in_arg {
+ name: "gamma"
+ description: <<END
+A 1D gamma Tensor with size matching the last dimension of t.
+If "scale_after_normalization" is true, this tensor will be multiplied
+with the normalized tensor.
+END
+ }
+ in_arg {
+ name: "gamma_min"
+ description: <<END
+The value represented by the lowest quantized gamma.
+END
+ }
+ in_arg {
+ name: "gamma_max"
+ description: <<END
+The value represented by the highest quantized gamma.
+END
+ }
+ attr {
+ name: "variance_epsilon"
+ description: <<END
+A small float number to avoid dividing by 0.
+END
+ }
+ attr {
+ name: "scale_after_normalization"
+ description: <<END
+A bool indicating whether the resulted tensor
+needs to be multiplied with gamma.
+END
+ }
+ summary: "Quantized Batch normalization."
+ description: <<END
+This op is deprecated and will be removed in the future. Prefer
+`tf.nn.batch_normalization`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedBiasAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedBiasAdd.pbtxt
new file mode 100644
index 0000000000..1d714e3aa2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedBiasAdd.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "QuantizedBiasAdd"
+ in_arg {
+ name: "bias"
+ description: <<END
+A 1D bias Tensor with size matching the last dimension of 'input'.
+END
+ }
+ in_arg {
+ name: "min_input"
+ description: <<END
+The float value that the lowest quantized input value represents.
+END
+ }
+ in_arg {
+ name: "max_input"
+ description: <<END
+The float value that the highest quantized input value represents.
+END
+ }
+ in_arg {
+ name: "min_bias"
+ description: <<END
+The float value that the lowest quantized bias value represents.
+END
+ }
+ in_arg {
+ name: "max_bias"
+ description: <<END
+The float value that the highest quantized bias value represents.
+END
+ }
+ out_arg {
+ name: "min_out"
+ description: <<END
+The float value that the lowest quantized output value represents.
+END
+ }
+ out_arg {
+ name: "max_out"
+ description: <<END
+The float value that the highest quantized output value represents.
+END
+ }
+ summary: "Adds Tensor \'bias\' to Tensor \'input\' for Quantized types."
+ description: <<END
+Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedConcat.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedConcat.pbtxt
new file mode 100644
index 0000000000..e39654fe90
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedConcat.pbtxt
@@ -0,0 +1,50 @@
+op {
+ graph_op_name: "QuantizedConcat"
+ in_arg {
+ name: "concat_dim"
+ description: <<END
+0-D. The dimension along which to concatenate. Must be in the
+range [0, rank(values)).
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+The `N` Tensors to concatenate. Their ranks and types must match,
+and their sizes must match in all dimensions except `concat_dim`.
+END
+ }
+ in_arg {
+ name: "input_mins"
+ description: <<END
+The minimum scalar values for each of the input tensors.
+END
+ }
+ in_arg {
+ name: "input_maxes"
+ description: <<END
+The maximum scalar values for each of the input tensors.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A `Tensor` with the concatenation of values stacked along the
+`concat_dim` dimension. This tensor's shape matches that of `values` except
+in `concat_dim` where it has the sum of the sizes.
+END
+ }
+ out_arg {
+ name: "output_min"
+ description: <<END
+The float value that the minimum quantized output value represents.
+END
+ }
+ out_arg {
+ name: "output_max"
+ description: <<END
+The float value that the maximum quantized output value represents.
+END
+ }
+ summary: "Concatenates quantized tensors along one dimension."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedConv2D.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedConv2D.pbtxt
new file mode 100644
index 0000000000..b19bbeab12
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedConv2D.pbtxt
@@ -0,0 +1,65 @@
+op {
+ graph_op_name: "QuantizedConv2D"
+ in_arg {
+ name: "filter"
+ description: <<END
+filter's input_depth dimension must match input's depth dimensions.
+END
+ }
+ in_arg {
+ name: "min_input"
+ description: <<END
+The float value that the lowest quantized input value represents.
+END
+ }
+ in_arg {
+ name: "max_input"
+ description: <<END
+The float value that the highest quantized input value represents.
+END
+ }
+ in_arg {
+ name: "min_filter"
+ description: <<END
+The float value that the lowest quantized filter value represents.
+END
+ }
+ in_arg {
+ name: "max_filter"
+ description: <<END
+The float value that the highest quantized filter value represents.
+END
+ }
+ out_arg {
+ name: "min_output"
+ description: <<END
+The float value that the lowest quantized output value represents.
+END
+ }
+ out_arg {
+ name: "max_output"
+ description: <<END
+The float value that the highest quantized output value represents.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the input
+tensor.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Computes a 2D convolution given quantized 4D input and filter tensors."
+ description: <<END
+The inputs are quantized tensors where the lowest value represents the real
+number of the associated minimum, and the highest represents the maximum.
+This means that you can only interpret the quantized output in the same way, by
+taking the returned minimum and maximum values into account.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedInstanceNorm.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedInstanceNorm.pbtxt
new file mode 100644
index 0000000000..7c30870fde
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedInstanceNorm.pbtxt
@@ -0,0 +1,72 @@
+op {
+ graph_op_name: "QuantizedInstanceNorm"
+ in_arg {
+ name: "x"
+ description: <<END
+A 4D input Tensor.
+END
+ }
+ in_arg {
+ name: "x_min"
+ description: <<END
+The value represented by the lowest quantized input.
+END
+ }
+ in_arg {
+ name: "x_max"
+ description: <<END
+The value represented by the highest quantized input.
+END
+ }
+ out_arg {
+ name: "y"
+ description: <<END
+A 4D Tensor.
+END
+ }
+ out_arg {
+ name: "y_min"
+ description: <<END
+The value represented by the lowest quantized output.
+END
+ }
+ out_arg {
+ name: "y_max"
+ description: <<END
+The value represented by the highest quantized output.
+END
+ }
+ attr {
+ name: "output_range_given"
+ description: <<END
+If True, `given_y_min` and `given_y_min`
+and `given_y_max` are used as the output range. Otherwise,
+the implementation computes the output range.
+END
+ }
+ attr {
+ name: "given_y_min"
+ description: <<END
+Output in `y_min` if `output_range_given` is True.
+END
+ }
+ attr {
+ name: "given_y_max"
+ description: <<END
+Output in `y_max` if `output_range_given` is True.
+END
+ }
+ attr {
+ name: "variance_epsilon"
+ description: <<END
+A small float number to avoid dividing by 0.
+END
+ }
+ attr {
+ name: "min_separation"
+ description: <<END
+Minimum value of `y_max - y_min`
+END
+ }
+ summary: "Quantized Instance normalization."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedMatMul.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedMatMul.pbtxt
new file mode 100644
index 0000000000..d318208900
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedMatMul.pbtxt
@@ -0,0 +1,77 @@
+op {
+ graph_op_name: "QuantizedMatMul"
+ in_arg {
+ name: "a"
+ description: <<END
+Must be a two-dimensional tensor.
+END
+ }
+ in_arg {
+ name: "b"
+ description: <<END
+Must be a two-dimensional tensor.
+END
+ }
+ in_arg {
+ name: "min_a"
+ description: <<END
+The float value that the lowest quantized `a` value represents.
+END
+ }
+ in_arg {
+ name: "max_a"
+ description: <<END
+The float value that the highest quantized `a` value represents.
+END
+ }
+ in_arg {
+ name: "min_b"
+ description: <<END
+The float value that the lowest quantized `b` value represents.
+END
+ }
+ in_arg {
+ name: "max_b"
+ description: <<END
+The float value that the highest quantized `b` value represents.
+END
+ }
+ out_arg {
+ name: "min_out"
+ description: <<END
+The float value that the lowest quantized output value represents.
+END
+ }
+ out_arg {
+ name: "max_out"
+ description: <<END
+The float value that the highest quantized output value represents.
+END
+ }
+ attr {
+ name: "transpose_a"
+ description: <<END
+If true, `a` is transposed before multiplication.
+END
+ }
+ attr {
+ name: "transpose_b"
+ description: <<END
+If true, `b` is transposed before multiplication.
+END
+ }
+ attr {
+ name: "Tactivation"
+ description: <<END
+The type of output produced by activation function
+following this operation.
+END
+ }
+ summary: "Perform a quantized matrix multiplication of `a` by the matrix `b`."
+ description: <<END
+The inputs must be two-dimensional matrices and the inner dimension of
+`a` (after being transposed if `transpose_a` is non-zero) must match the
+outer dimension of `b` (after being transposed if `transposed_b` is
+non-zero).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedMaxPool.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedMaxPool.pbtxt
new file mode 100644
index 0000000000..208950754b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedMaxPool.pbtxt
@@ -0,0 +1,54 @@
+op {
+ graph_op_name: "QuantizedMaxPool"
+ in_arg {
+ name: "input"
+ description: <<END
+The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
+END
+ }
+ in_arg {
+ name: "min_input"
+ description: <<END
+The float value that the lowest quantized input value represents.
+END
+ }
+ in_arg {
+ name: "max_input"
+ description: <<END
+The float value that the highest quantized input value represents.
+END
+ }
+ out_arg {
+ name: "min_output"
+ description: <<END
+The float value that the lowest quantized output value represents.
+END
+ }
+ out_arg {
+ name: "max_output"
+ description: <<END
+The float value that the highest quantized output value represents.
+END
+ }
+ attr {
+ name: "ksize"
+ description: <<END
+The size of the window for each dimension of the input tensor.
+The length must be 4 to match the number of dimensions of the input.
+END
+ }
+ attr {
+ name: "strides"
+ description: <<END
+The stride of the sliding window for each dimension of the input
+tensor. The length must be 4 to match the number of dimensions of the input.
+END
+ }
+ attr {
+ name: "padding"
+ description: <<END
+The type of padding algorithm to use.
+END
+ }
+ summary: "Produces the max pool of the input tensor for quantized types."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedMul.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedMul.pbtxt
new file mode 100644
index 0000000000..a6061204f3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedMul.pbtxt
@@ -0,0 +1,43 @@
+op {
+ graph_op_name: "QuantizedMul"
+ in_arg {
+ name: "min_x"
+ description: <<END
+The float value that the lowest quantized `x` value represents.
+END
+ }
+ in_arg {
+ name: "max_x"
+ description: <<END
+The float value that the highest quantized `x` value represents.
+END
+ }
+ in_arg {
+ name: "min_y"
+ description: <<END
+The float value that the lowest quantized `y` value represents.
+END
+ }
+ in_arg {
+ name: "max_y"
+ description: <<END
+The float value that the highest quantized `y` value represents.
+END
+ }
+ out_arg {
+ name: "min_z"
+ description: <<END
+The float value that the lowest quantized output value represents.
+END
+ }
+ out_arg {
+ name: "max_z"
+ description: <<END
+The float value that the highest quantized output value represents.
+
+*NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about
+broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+ }
+ summary: "Returns x * y element-wise, working on quantized buffers."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedRelu.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedRelu.pbtxt
new file mode 100644
index 0000000000..519fbf1806
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedRelu.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "QuantizedRelu"
+ in_arg {
+ name: "min_features"
+ description: <<END
+The float value that the lowest quantized value represents.
+END
+ }
+ in_arg {
+ name: "max_features"
+ description: <<END
+The float value that the highest quantized value represents.
+END
+ }
+ out_arg {
+ name: "activations"
+ description: <<END
+Has the same output shape as "features".
+END
+ }
+ out_arg {
+ name: "min_activations"
+ description: <<END
+The float value that the lowest quantized value represents.
+END
+ }
+ out_arg {
+ name: "max_activations"
+ description: <<END
+The float value that the highest quantized value represents.
+END
+ }
+ summary: "Computes Quantized Rectified Linear: `max(features, 0)`"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedRelu6.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedRelu6.pbtxt
new file mode 100644
index 0000000000..62fd01b4aa
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedRelu6.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "QuantizedRelu6"
+ in_arg {
+ name: "min_features"
+ description: <<END
+The float value that the lowest quantized value represents.
+END
+ }
+ in_arg {
+ name: "max_features"
+ description: <<END
+The float value that the highest quantized value represents.
+END
+ }
+ out_arg {
+ name: "activations"
+ description: <<END
+Has the same output shape as "features".
+END
+ }
+ out_arg {
+ name: "min_activations"
+ description: <<END
+The float value that the lowest quantized value represents.
+END
+ }
+ out_arg {
+ name: "max_activations"
+ description: <<END
+The float value that the highest quantized value represents.
+END
+ }
+ summary: "Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedReluX.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedReluX.pbtxt
new file mode 100644
index 0000000000..5763a19677
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedReluX.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "QuantizedReluX"
+ in_arg {
+ name: "min_features"
+ description: <<END
+The float value that the lowest quantized value represents.
+END
+ }
+ in_arg {
+ name: "max_features"
+ description: <<END
+The float value that the highest quantized value represents.
+END
+ }
+ out_arg {
+ name: "activations"
+ description: <<END
+Has the same output shape as "features".
+END
+ }
+ out_arg {
+ name: "min_activations"
+ description: <<END
+The float value that the lowest quantized value represents.
+END
+ }
+ out_arg {
+ name: "max_activations"
+ description: <<END
+The float value that the highest quantized value represents.
+END
+ }
+ summary: "Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedReshape.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedReshape.pbtxt
new file mode 100644
index 0000000000..b20333f8c7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedReshape.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "QuantizedReshape"
+ in_arg {
+ name: "shape"
+ description: <<END
+Defines the shape of the output tensor.
+END
+ }
+ in_arg {
+ name: "input_min"
+ description: <<END
+The minimum value of the input.
+END
+ }
+ in_arg {
+ name: "input_max"
+ description: <<END
+The maximum value of the input.
+END
+ }
+ out_arg {
+ name: "output_min"
+ description: <<END
+This value is copied from input_min.
+END
+ }
+ out_arg {
+ name: "output_max"
+ description: <<END
+This value is copied from input_max.
+END
+ }
+ summary: "Reshapes a quantized tensor as per the Reshape op."
+ description: <<END
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizedResizeBilinear.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizedResizeBilinear.pbtxt
new file mode 100644
index 0000000000..6b3ba72e53
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QuantizedResizeBilinear.pbtxt
@@ -0,0 +1,35 @@
+op {
+ graph_op_name: "QuantizedResizeBilinear"
+ in_arg {
+ name: "images"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
+new size for the images.
+END
+ }
+ out_arg {
+ name: "resized_images"
+ description: <<END
+4-D with shape
+`[batch, new_height, new_width, channels]`.
+END
+ }
+ attr {
+ name: "align_corners"
+ description: <<END
+If true, rescale input by (new_height - 1) / (height - 1), which
+exactly aligns the 4 corners of images and resized images. If false, rescale
+by new_height / height. Treat similarly the width dimension.
+END
+ }
+ summary: "Resize quantized `images` to `size` using quantized bilinear interpolation."
+ description: <<END
+Input images and output images must be quantized types.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueClose.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueClose.pbtxt
new file mode 100644
index 0000000000..950425a853
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueClose.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "QueueClose"
+ visibility: SKIP
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ attr {
+ name: "cancel_pending_enqueues"
+ description: <<END
+If true, all pending enqueue requests that are
+blocked on the given queue will be canceled.
+END
+ }
+ summary: "Closes the given queue."
+ description: <<END
+This operation signals that no more elements will be enqueued in the
+given queue. Subsequent Enqueue(Many) operations will fail.
+Subsequent Dequeue(Many) operations will continue to succeed if
+sufficient elements remain in the queue. Subsequent Dequeue(Many)
+operations that would block will fail immediately.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueCloseV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueCloseV2.pbtxt
new file mode 100644
index 0000000000..a5603269a0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueCloseV2.pbtxt
@@ -0,0 +1,27 @@
+op {
+ graph_op_name: "QueueCloseV2"
+ endpoint {
+ name: "QueueClose"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ attr {
+ name: "cancel_pending_enqueues"
+ description: <<END
+If true, all pending enqueue requests that are
+blocked on the given queue will be canceled.
+END
+ }
+ summary: "Closes the given queue."
+ description: <<END
+This operation signals that no more elements will be enqueued in the
+given queue. Subsequent Enqueue(Many) operations will fail.
+Subsequent Dequeue(Many) operations will continue to succeed if
+sufficient elements remain in the queue. Subsequent Dequeue(Many)
+operations that would block will fail immediately.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueDequeue.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueDequeue.pbtxt
new file mode 100644
index 0000000000..3290e10f0b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueDequeue.pbtxt
@@ -0,0 +1,39 @@
+op {
+ graph_op_name: "QueueDequeue"
+ visibility: SKIP
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ out_arg {
+ name: "components"
+ description: <<END
+One or more tensors that were dequeued as a tuple.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a tuple.
+END
+ }
+ attr {
+ name: "timeout_ms"
+ description: <<END
+If the queue is empty, this operation will block for up to
+timeout_ms milliseconds.
+Note: This option is not supported yet.
+END
+ }
+ summary: "Dequeues a tuple of one or more tensors from the given queue."
+ description: <<END
+This operation has k outputs, where k is the number of components
+in the tuples stored in the given queue, and output i is the ith
+component of the dequeued tuple.
+
+N.B. If the queue is empty, this operation will block until an element
+has been dequeued (or 'timeout_ms' elapses, if specified).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueDequeueMany.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueDequeueMany.pbtxt
new file mode 100644
index 0000000000..2247b37bb2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueDequeueMany.pbtxt
@@ -0,0 +1,52 @@
+op {
+ graph_op_name: "QueueDequeueMany"
+ visibility: SKIP
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ in_arg {
+ name: "n"
+ description: <<END
+The number of tuples to dequeue.
+END
+ }
+ out_arg {
+ name: "components"
+ description: <<END
+One or more tensors that were dequeued as a tuple.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a tuple.
+END
+ }
+ attr {
+ name: "timeout_ms"
+ description: <<END
+If the queue has fewer than n elements, this operation
+will block for up to timeout_ms milliseconds.
+Note: This option is not supported yet.
+END
+ }
+ summary: "Dequeues `n` tuples of one or more tensors from the given queue."
+ description: <<END
+If the queue is closed and there are fewer than `n` elements, then an
+OutOfRange error is returned.
+
+This operation concatenates queue-element component tensors along the
+0th dimension to make a single component tensor. All of the components
+in the dequeued tuple will have size `n` in the 0th dimension.
+
+This operation has `k` outputs, where `k` is the number of components in
+the tuples stored in the given queue, and output `i` is the ith
+component of the dequeued tuple.
+
+N.B. If the queue is empty, this operation will block until `n` elements
+have been dequeued (or 'timeout_ms' elapses, if specified).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueDequeueManyV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueDequeueManyV2.pbtxt
new file mode 100644
index 0000000000..34a65c2944
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueDequeueManyV2.pbtxt
@@ -0,0 +1,54 @@
+op {
+ graph_op_name: "QueueDequeueManyV2"
+ endpoint {
+ name: "QueueDequeueMany"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ in_arg {
+ name: "n"
+ description: <<END
+The number of tuples to dequeue.
+END
+ }
+ out_arg {
+ name: "components"
+ description: <<END
+One or more tensors that were dequeued as a tuple.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a tuple.
+END
+ }
+ attr {
+ name: "timeout_ms"
+ description: <<END
+If the queue has fewer than n elements, this operation
+will block for up to timeout_ms milliseconds.
+Note: This option is not supported yet.
+END
+ }
+ summary: "Dequeues `n` tuples of one or more tensors from the given queue."
+ description: <<END
+If the queue is closed and there are fewer than `n` elements, then an
+OutOfRange error is returned.
+
+This operation concatenates queue-element component tensors along the
+0th dimension to make a single component tensor. All of the components
+in the dequeued tuple will have size `n` in the 0th dimension.
+
+This operation has `k` outputs, where `k` is the number of components in
+the tuples stored in the given queue, and output `i` is the ith
+component of the dequeued tuple.
+
+N.B. If the queue is empty, this operation will block until `n` elements
+have been dequeued (or 'timeout_ms' elapses, if specified).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueDequeueUpTo.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueDequeueUpTo.pbtxt
new file mode 100644
index 0000000000..a0c7c204aa
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueDequeueUpTo.pbtxt
@@ -0,0 +1,56 @@
+op {
+ graph_op_name: "QueueDequeueUpTo"
+ visibility: SKIP
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ in_arg {
+ name: "n"
+ description: <<END
+The number of tuples to dequeue.
+END
+ }
+ out_arg {
+ name: "components"
+ description: <<END
+One or more tensors that were dequeued as a tuple.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a tuple.
+END
+ }
+ attr {
+ name: "timeout_ms"
+ description: <<END
+If the queue has fewer than n elements, this operation
+will block for up to timeout_ms milliseconds.
+Note: This option is not supported yet.
+END
+ }
+ summary: "Dequeues `n` tuples of one or more tensors from the given queue."
+ description: <<END
+This operation is not supported by all queues. If a queue does not support
+DequeueUpTo, then an Unimplemented error is returned.
+
+If the queue is closed and there are more than 0 but less than `n`
+elements remaining, then instead of returning an OutOfRange error like
+QueueDequeueMany, less than `n` elements are returned immediately. If
+the queue is closed and there are 0 elements left in the queue, then
+an OutOfRange error is returned just like in QueueDequeueMany.
+Otherwise the behavior is identical to QueueDequeueMany:
+
+This operation concatenates queue-element component tensors along the
+0th dimension to make a single component tensor. All of the components
+in the dequeued tuple will have size `n` in the 0th dimension.
+
+This operation has k outputs, where `k` is the number of components in
+the tuples stored in the given queue, and output `i` is the ith
+component of the dequeued tuple.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueDequeueUpToV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueDequeueUpToV2.pbtxt
new file mode 100644
index 0000000000..003e5f2c75
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueDequeueUpToV2.pbtxt
@@ -0,0 +1,58 @@
+op {
+ graph_op_name: "QueueDequeueUpToV2"
+ endpoint {
+ name: "QueueDequeueUpTo"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ in_arg {
+ name: "n"
+ description: <<END
+The number of tuples to dequeue.
+END
+ }
+ out_arg {
+ name: "components"
+ description: <<END
+One or more tensors that were dequeued as a tuple.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a tuple.
+END
+ }
+ attr {
+ name: "timeout_ms"
+ description: <<END
+If the queue has fewer than n elements, this operation
+will block for up to timeout_ms milliseconds.
+Note: This option is not supported yet.
+END
+ }
+ summary: "Dequeues `n` tuples of one or more tensors from the given queue."
+ description: <<END
+This operation is not supported by all queues. If a queue does not support
+DequeueUpTo, then an Unimplemented error is returned.
+
+If the queue is closed and there are more than 0 but less than `n`
+elements remaining, then instead of returning an OutOfRange error like
+QueueDequeueMany, less than `n` elements are returned immediately. If
+the queue is closed and there are 0 elements left in the queue, then
+an OutOfRange error is returned just like in QueueDequeueMany.
+Otherwise the behavior is identical to QueueDequeueMany:
+
+This operation concatenates queue-element component tensors along the
+0th dimension to make a single component tensor. All of the components
+in the dequeued tuple will have size n in the 0th dimension.
+
+This operation has `k` outputs, where `k` is the number of components in
+the tuples stored in the given queue, and output `i` is the ith
+component of the dequeued tuple.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueDequeueV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueDequeueV2.pbtxt
new file mode 100644
index 0000000000..fda760cfe5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueDequeueV2.pbtxt
@@ -0,0 +1,41 @@
+op {
+ graph_op_name: "QueueDequeueV2"
+ endpoint {
+ name: "QueueDequeue"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ out_arg {
+ name: "components"
+ description: <<END
+One or more tensors that were dequeued as a tuple.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a tuple.
+END
+ }
+ attr {
+ name: "timeout_ms"
+ description: <<END
+If the queue is empty, this operation will block for up to
+timeout_ms milliseconds.
+Note: This option is not supported yet.
+END
+ }
+ summary: "Dequeues a tuple of one or more tensors from the given queue."
+ description: <<END
+This operation has k outputs, where k is the number of components
+in the tuples stored in the given queue, and output i is the ith
+component of the dequeued tuple.
+
+N.B. If the queue is empty, this operation will block until an element
+has been dequeued (or 'timeout_ms' elapses, if specified).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueEnqueue.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueEnqueue.pbtxt
new file mode 100644
index 0000000000..76477b51da
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueEnqueue.pbtxt
@@ -0,0 +1,32 @@
+op {
+ graph_op_name: "QueueEnqueue"
+ visibility: SKIP
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ in_arg {
+ name: "components"
+ description: <<END
+One or more tensors from which the enqueued tensors should be taken.
+END
+ }
+ attr {
+ name: "timeout_ms"
+ description: <<END
+If the queue is full, this operation will block for up to
+timeout_ms milliseconds.
+Note: This option is not supported yet.
+END
+ }
+ summary: "Enqueues a tuple of one or more tensors in the given queue."
+ description: <<END
+The components input has k elements, which correspond to the components of
+tuples stored in the given queue.
+
+N.B. If the queue is full, this operation will block until the given
+element has been enqueued (or 'timeout_ms' elapses, if specified).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueEnqueueMany.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueEnqueueMany.pbtxt
new file mode 100644
index 0000000000..cbd282d9b9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueEnqueueMany.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "QueueEnqueueMany"
+ visibility: SKIP
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ in_arg {
+ name: "components"
+ description: <<END
+One or more tensors from which the enqueued tensors should
+be taken.
+END
+ }
+ attr {
+ name: "timeout_ms"
+ description: <<END
+If the queue is too full, this operation will block for up
+to timeout_ms milliseconds.
+Note: This option is not supported yet.
+END
+ }
+ summary: "Enqueues zero or more tuples of one or more tensors in the given queue."
+ description: <<END
+This operation slices each component tensor along the 0th dimension to
+make multiple queue elements. All of the tuple components must have the
+same size in the 0th dimension.
+
+The components input has k elements, which correspond to the components of
+tuples stored in the given queue.
+
+N.B. If the queue is full, this operation will block until the given
+elements have been enqueued (or 'timeout_ms' elapses, if specified).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueEnqueueManyV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueEnqueueManyV2.pbtxt
new file mode 100644
index 0000000000..4c721caa25
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueEnqueueManyV2.pbtxt
@@ -0,0 +1,39 @@
+op {
+ graph_op_name: "QueueEnqueueManyV2"
+ endpoint {
+ name: "QueueEnqueueMany"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ in_arg {
+ name: "components"
+ description: <<END
+One or more tensors from which the enqueued tensors should
+be taken.
+END
+ }
+ attr {
+ name: "timeout_ms"
+ description: <<END
+If the queue is too full, this operation will block for up
+to timeout_ms milliseconds.
+Note: This option is not supported yet.
+END
+ }
+ summary: "Enqueues zero or more tuples of one or more tensors in the given queue."
+ description: <<END
+This operation slices each component tensor along the 0th dimension to
+make multiple queue elements. All of the tuple components must have the
+same size in the 0th dimension.
+
+The components input has k elements, which correspond to the components of
+tuples stored in the given queue.
+
+N.B. If the queue is full, this operation will block until the given
+elements have been enqueued (or 'timeout_ms' elapses, if specified).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueEnqueueV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueEnqueueV2.pbtxt
new file mode 100644
index 0000000000..367d197cb0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueEnqueueV2.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "QueueEnqueueV2"
+ endpoint {
+ name: "QueueEnqueue"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ in_arg {
+ name: "components"
+ description: <<END
+One or more tensors from which the enqueued tensors should be taken.
+END
+ }
+ attr {
+ name: "timeout_ms"
+ description: <<END
+If the queue is full, this operation will block for up to
+timeout_ms milliseconds.
+Note: This option is not supported yet.
+END
+ }
+ summary: "Enqueues a tuple of one or more tensors in the given queue."
+ description: <<END
+The components input has k elements, which correspond to the components of
+tuples stored in the given queue.
+
+N.B. If the queue is full, this operation will block until the given
+element has been enqueued (or 'timeout_ms' elapses, if specified).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueIsClosed.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueIsClosed.pbtxt
new file mode 100644
index 0000000000..9412b2e6d6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueIsClosed.pbtxt
@@ -0,0 +1,14 @@
+op {
+ graph_op_name: "QueueIsClosed"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ summary: "Returns true if queue is closed."
+ description: <<END
+This operation returns true if the queue is closed and false if the queue
+is open.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueIsClosedV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueIsClosedV2.pbtxt
new file mode 100644
index 0000000000..45aa4d10fb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueIsClosedV2.pbtxt
@@ -0,0 +1,14 @@
+op {
+ graph_op_name: "QueueIsClosedV2"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ summary: "Returns true if queue is closed."
+ description: <<END
+This operation returns true if the queue is closed and false if the queue
+is open.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueSize.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueSize.pbtxt
new file mode 100644
index 0000000000..74fd38c0ee
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueSize.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "QueueSize"
+ visibility: SKIP
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ out_arg {
+ name: "size"
+ description: <<END
+The number of elements in the given queue.
+END
+ }
+ summary: "Computes the number of elements in the given queue."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_QueueSizeV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_QueueSizeV2.pbtxt
new file mode 100644
index 0000000000..f0cfa40f65
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_QueueSizeV2.pbtxt
@@ -0,0 +1,19 @@
+op {
+ graph_op_name: "QueueSizeV2"
+ endpoint {
+ name: "QueueSize"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a queue.
+END
+ }
+ out_arg {
+ name: "size"
+ description: <<END
+The number of elements in the given queue.
+END
+ }
+ summary: "Computes the number of elements in the given queue."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_R.pbtxt b/tensorflow/core/api_def/base_api/api_def_R.pbtxt
deleted file mode 100644
index 4c398c9771..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_R.pbtxt
+++ /dev/null
@@ -1,1392 +0,0 @@
-op {
- graph_op_name: "RFFT"
- endpoint {
- name: "RFFT"
- }
- summary: "Real-valued fast Fourier transform."
- description: <<END
-Computes the 1-dimensional discrete Fourier transform of a real-valued signal
-over the inner-most dimension of `input`.
-
-Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
-`fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
-followed by the `fft_length / 2` positive-frequency terms.
-
-Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
-corresponding dimension of `input`, the dimension is cropped. If it is larger,
-the dimension is padded with zeros.
-END
-}
-op {
- graph_op_name: "RFFT2D"
- endpoint {
- name: "RFFT2D"
- }
- summary: "2D real-valued fast Fourier transform."
- description: <<END
-Computes the 2-dimensional discrete Fourier transform of a real-valued signal
-over the inner-most 2 dimensions of `input`.
-
-Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
-`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
-of `output`: the zero-frequency term, followed by the `fft_length / 2`
-positive-frequency terms.
-
-Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
-corresponding dimension of `input`, the dimension is cropped. If it is larger,
-the dimension is padded with zeros.
-END
-}
-op {
- graph_op_name: "RFFT3D"
- endpoint {
- name: "RFFT3D"
- }
- summary: "3D real-valued fast Fourier transform."
- description: <<END
-Computes the 3-dimensional discrete Fourier transform of a real-valued signal
-over the inner-most 3 dimensions of `input`.
-
-Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
-`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
-of `output`: the zero-frequency term, followed by the `fft_length / 2`
-positive-frequency terms.
-
-Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
-corresponding dimension of `input`, the dimension is cropped. If it is larger,
-the dimension is padded with zeros.
-END
-}
-op {
- graph_op_name: "RGBToHSV"
- endpoint {
- name: "RGBToHSV"
- }
- summary: "Converts one or more images from RGB to HSV."
- description: <<END
-Outputs a tensor of the same shape as the `images` tensor, containing the HSV
-value of the pixels. The output is only well defined if the value in `images`
-are in `[0,1]`.
-
-`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
-`output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
-corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
-END
-}
-op {
- graph_op_name: "RandomCrop"
- endpoint {
- name: "RandomCrop"
- }
- summary: "Randomly crop `image`."
- description: <<END
-`size` is a 1-D int64 tensor with 2 elements representing the crop height and
-width. The values must be non negative.
-
-This Op picks a random location in `image` and crops a `height` by `width`
-rectangle from that location. The random location is picked so the cropped
-area will fit inside the original image.
-END
-}
-op {
- graph_op_name: "RandomGamma"
- endpoint {
- name: "RandomGamma"
- }
- summary: "Outputs random values from the Gamma distribution(s) described by alpha."
- description: <<END
-This op uses the algorithm by Marsaglia et al. to acquire samples via
-transformation-rejection from pairs of uniform and normal random variables.
-See http://dl.acm.org/citation.cfm?id=358414
-END
-}
-op {
- graph_op_name: "RandomPoisson"
- endpoint {
- name: "RandomPoisson"
- }
- summary: "Outputs random values from the Poisson distribution(s) described by rate."
- description: <<END
-This op uses two algorithms, depending on rate. If rate >= 10, then
-the algorithm by Hormann is used to acquire samples via
-transformation-rejection.
-See http://www.sciencedirect.com/science/article/pii/0167668793909974.
-
-Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
-random variables.
-See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
-Programming, Volume 2. Addison Wesley
-END
-}
-op {
- graph_op_name: "RandomPoissonV2"
- endpoint {
- name: "RandomPoissonV2"
- }
- summary: "Outputs random values from the Poisson distribution(s) described by rate."
- description: <<END
-This op uses two algorithms, depending on rate. If rate >= 10, then
-the algorithm by Hormann is used to acquire samples via
-transformation-rejection.
-See http://www.sciencedirect.com/science/article/pii/0167668793909974.
-
-Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
-random variables.
-See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
-Programming, Volume 2. Addison Wesley
-END
-}
-op {
- graph_op_name: "RandomShuffle"
- endpoint {
- name: "RandomShuffle"
- }
- summary: "Randomly shuffles a tensor along its first dimension."
- description: <<END
- The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
- to one and only one `output[i]`. For example, a mapping that might occur for a
- 3x2 tensor is:
-
-```
-[[1, 2], [[5, 6],
- [3, 4], ==> [1, 2],
- [5, 6]] [3, 4]]
-```
-END
-}
-op {
- graph_op_name: "RandomShuffleQueue"
- endpoint {
- name: "RandomShuffleQueue"
- }
- summary: "A queue that randomizes the order of elements."
-}
-op {
- graph_op_name: "RandomShuffleQueueV2"
- endpoint {
- name: "RandomShuffleQueueV2"
- }
- summary: "A queue that randomizes the order of elements."
-}
-op {
- graph_op_name: "RandomStandardNormal"
- endpoint {
- name: "RandomStandardNormal"
- }
- summary: "Outputs random values from a normal distribution."
- description: <<END
-The generated values will have mean 0 and standard deviation 1.
-END
-}
-op {
- graph_op_name: "RandomUniform"
- endpoint {
- name: "RandomUniform"
- }
- summary: "Outputs random values from a uniform distribution."
- description: <<END
-The generated values follow a uniform distribution in the range `[0, 1)`. The
-lower bound 0 is included in the range, while the upper bound 1 is excluded.
-END
-}
-op {
- graph_op_name: "RandomUniformInt"
- endpoint {
- name: "RandomUniformInt"
- }
- summary: "Outputs random integers from a uniform distribution."
- description: <<END
-The generated values are uniform integers in the range `[minval, maxval)`.
-The lower bound `minval` is included in the range, while the upper bound
-`maxval` is excluded.
-
-The random integers are slightly biased unless `maxval - minval` is an exact
-power of two. The bias is small for values of `maxval - minval` significantly
-smaller than the range of the output (either `2^32` or `2^64`).
-END
-}
-op {
- graph_op_name: "Range"
- endpoint {
- name: "Range"
- }
- summary: "Creates a sequence of numbers."
- description: <<END
-This operation creates a sequence of numbers that begins at `start` and
-extends by increments of `delta` up to but not including `limit`.
-
-For example:
-
-```
-# 'start' is 3
-# 'limit' is 18
-# 'delta' is 3
-tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
-```
-END
-}
-op {
- graph_op_name: "RangeDataset"
- endpoint {
- name: "RangeDataset"
- }
- summary: "Creates a dataset with a range of values. Corresponds to python\'s xrange."
-}
-op {
- graph_op_name: "Rank"
- endpoint {
- name: "Rank"
- }
- summary: "Returns the rank of a tensor."
- description: <<END
-This operation returns an integer representing the rank of `input`.
-
-For example:
-
-```
-# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
-# shape of tensor 't' is [2, 2, 3]
-rank(t) ==> 3
-```
-
-**Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
-of a tensor is the number of indices required to uniquely select each element
-of the tensor. Rank is also known as "order", "degree", or "ndims."
-END
-}
-op {
- graph_op_name: "ReadFile"
- endpoint {
- name: "ReadFile"
- }
- summary: "Reads and outputs the entire contents of the input filename."
-}
-op {
- graph_op_name: "ReaderNumRecordsProduced"
- endpoint {
- name: "ReaderNumRecordsProduced"
- }
- summary: "Returns the number of records this Reader has produced."
- description: <<END
-This is the same as the number of ReaderRead executions that have
-succeeded.
-END
-}
-op {
- graph_op_name: "ReaderNumRecordsProducedV2"
- endpoint {
- name: "ReaderNumRecordsProducedV2"
- }
- summary: "Returns the number of records this Reader has produced."
- description: <<END
-This is the same as the number of ReaderRead executions that have
-succeeded.
-END
-}
-op {
- graph_op_name: "ReaderNumWorkUnitsCompleted"
- endpoint {
- name: "ReaderNumWorkUnitsCompleted"
- }
- summary: "Returns the number of work units this Reader has finished processing."
-}
-op {
- graph_op_name: "ReaderNumWorkUnitsCompletedV2"
- endpoint {
- name: "ReaderNumWorkUnitsCompletedV2"
- }
- summary: "Returns the number of work units this Reader has finished processing."
-}
-op {
- graph_op_name: "ReaderRead"
- endpoint {
- name: "ReaderRead"
- }
- summary: "Returns the next record (key, value pair) produced by a Reader."
- description: <<END
-Will dequeue from the input queue if necessary (e.g. when the
-Reader needs to start reading from a new file since it has finished
-with the previous file).
-END
-}
-op {
- graph_op_name: "ReaderReadUpTo"
- endpoint {
- name: "ReaderReadUpTo"
- }
- summary: "Returns up to `num_records` (key, value) pairs produced by a Reader."
- description: <<END
-Will dequeue from the input queue if necessary (e.g. when the
-Reader needs to start reading from a new file since it has finished
-with the previous file).
-It may return less than `num_records` even before the last batch.
-END
-}
-op {
- graph_op_name: "ReaderReadUpToV2"
- endpoint {
- name: "ReaderReadUpToV2"
- }
- summary: "Returns up to `num_records` (key, value) pairs produced by a Reader."
- description: <<END
-Will dequeue from the input queue if necessary (e.g. when the
-Reader needs to start reading from a new file since it has finished
-with the previous file).
-It may return less than `num_records` even before the last batch.
-END
-}
-op {
- graph_op_name: "ReaderReadV2"
- endpoint {
- name: "ReaderReadV2"
- }
- summary: "Returns the next record (key, value pair) produced by a Reader."
- description: <<END
-Will dequeue from the input queue if necessary (e.g. when the
-Reader needs to start reading from a new file since it has finished
-with the previous file).
-END
-}
-op {
- graph_op_name: "ReaderReset"
- endpoint {
- name: "ReaderReset"
- }
- summary: "Restore a Reader to its initial clean state."
-}
-op {
- graph_op_name: "ReaderResetV2"
- endpoint {
- name: "ReaderResetV2"
- }
- summary: "Restore a Reader to its initial clean state."
-}
-op {
- graph_op_name: "ReaderRestoreState"
- endpoint {
- name: "ReaderRestoreState"
- }
- summary: "Restore a reader to a previously saved state."
- description: <<END
-Not all Readers support being restored, so this can produce an
-Unimplemented error.
-END
-}
-op {
- graph_op_name: "ReaderRestoreStateV2"
- endpoint {
- name: "ReaderRestoreStateV2"
- }
- summary: "Restore a reader to a previously saved state."
- description: <<END
-Not all Readers support being restored, so this can produce an
-Unimplemented error.
-END
-}
-op {
- graph_op_name: "ReaderSerializeState"
- endpoint {
- name: "ReaderSerializeState"
- }
- summary: "Produce a string tensor that encodes the state of a Reader."
- description: <<END
-Not all Readers support being serialized, so this can produce an
-Unimplemented error.
-END
-}
-op {
- graph_op_name: "ReaderSerializeStateV2"
- endpoint {
- name: "ReaderSerializeStateV2"
- }
- summary: "Produce a string tensor that encodes the state of a Reader."
- description: <<END
-Not all Readers support being serialized, so this can produce an
-Unimplemented error.
-END
-}
-op {
- graph_op_name: "Real"
- endpoint {
- name: "Real"
- }
- summary: "Returns the real part of a complex number."
- description: <<END
-Given a tensor `input` of complex numbers, this operation returns a tensor of
-type `float` that is the real part of each element in `input`. All elements in
-`input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
- part returned by this operation and *b* is the imaginary part.
-
-For example:
-
-```
-# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
-tf.real(input) ==> [-2.25, 3.25]
-```
-END
-}
-op {
- graph_op_name: "RealDiv"
- endpoint {
- name: "RealDiv"
- }
- summary: "Returns x / y element-wise for real types."
- description: <<END
-If `x` and `y` are reals, this will return the floating-point division.
-
-*NOTE*: `Div` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "Reciprocal"
- endpoint {
- name: "Reciprocal"
- }
- summary: "Computes the reciprocal of x element-wise."
- description: <<END
-I.e., \\(y = 1 / x\\).
-END
-}
-op {
- graph_op_name: "ReciprocalGrad"
- endpoint {
- name: "ReciprocalGrad"
- }
- summary: "Computes the gradient for the inverse of `x` wrt its input."
- description: <<END
-Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
-is the corresponding input gradient.
-END
-}
-op {
- graph_op_name: "RecordInput"
- endpoint {
- name: "RecordInput"
- }
- summary: "Emits randomized records."
-}
-op {
- graph_op_name: "ReduceJoin"
- endpoint {
- name: "ReduceJoin"
- }
- summary: "Joins a string Tensor across the given dimensions."
- description: <<END
-Computes the string join across dimensions in the given string Tensor of shape
-`[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input
-strings with the given separator (default: empty string). Negative indices are
-counted backwards from the end, with `-1` being equivalent to `n - 1`.
-
-For example:
-
-```python
-# tensor `a` is [["a", "b"], ["c", "d"]]
-tf.reduce_join(a, 0) ==> ["ac", "bd"]
-tf.reduce_join(a, 1) ==> ["ab", "cd"]
-tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
-tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
-tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
-tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
-tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
-tf.reduce_join(a, [0, 1]) ==> ["acbd"]
-tf.reduce_join(a, [1, 0]) ==> ["abcd"]
-tf.reduce_join(a, []) ==> ["abcd"]
-```
-END
-}
-op {
- graph_op_name: "RefEnter"
- endpoint {
- name: "RefEnter"
- }
- summary: "Creates or finds a child frame, and makes `data` available to the child frame."
- description: <<END
-The unique `frame_name` is used by the `Executor` to identify frames. If
-`is_constant` is true, `output` is a constant in the child frame; otherwise
-it may be changed in the child frame. At most `parallel_iterations` iterations
-are run in parallel in the child frame.
-END
-}
-op {
- graph_op_name: "RefExit"
- endpoint {
- name: "RefExit"
- }
- summary: "Exits the current frame to its parent frame."
- description: <<END
-Exit makes its input `data` available to the parent frame.
-END
-}
-op {
- graph_op_name: "RefIdentity"
- endpoint {
- name: "RefIdentity"
- }
- summary: "Return the same ref tensor as the input ref tensor."
-}
-op {
- graph_op_name: "RefMerge"
- endpoint {
- name: "RefMerge"
- }
- summary: "Forwards the value of an available tensor from `inputs` to `output`."
- description: <<END
-`Merge` waits for at least one of the tensors in `inputs` to become available.
-It is usually combined with `Switch` to implement branching.
-
-`Merge` forwards the first tensor for become available to `output`, and sets
-`value_index` to its index in `inputs`.
-END
-}
-op {
- graph_op_name: "RefNextIteration"
- endpoint {
- name: "RefNextIteration"
- }
- summary: "Makes its input available to the next iteration."
-}
-op {
- graph_op_name: "RefSelect"
- endpoint {
- name: "RefSelect"
- }
- summary: "Forwards the `index`th element of `inputs` to `output`."
-}
-op {
- graph_op_name: "RefSwitch"
- endpoint {
- name: "RefSwitch"
- }
- summary: "Forwards the ref tensor `data` to the output port determined by `pred`."
- description: <<END
-If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
-the data goes to `output_false`.
-
-See also `Switch` and `Merge`.
-END
-}
-op {
- graph_op_name: "Relu"
- endpoint {
- name: "Relu"
- }
- summary: "Computes rectified linear: `max(features, 0)`."
-}
-op {
- graph_op_name: "Relu6"
- endpoint {
- name: "Relu6"
- }
- summary: "Computes rectified linear 6: `min(max(features, 0), 6)`."
-}
-op {
- graph_op_name: "Relu6Grad"
- endpoint {
- name: "Relu6Grad"
- }
- summary: "Computes rectified linear 6 gradients for a Relu6 operation."
-}
-op {
- graph_op_name: "ReluGrad"
- endpoint {
- name: "ReluGrad"
- }
- summary: "Computes rectified linear gradients for a Relu operation."
-}
-op {
- graph_op_name: "RemoteCall"
- endpoint {
- name: "RemoteCall"
- }
- summary: "Runs function `f` on a remote device indicated by `target`."
-}
-op {
- graph_op_name: "RemoteFusedGraphExecute"
- endpoint {
- name: "RemoteFusedGraphExecute"
- }
- summary: "Execute a sub graph on a remote processor."
- description: <<END
-The graph specifications(such as graph itself, input tensors and output names)
-are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo
-as serialized_remote_fused_graph_execute_info.
-The specifications will be passed to a dedicated registered
-remote fused graph executor. The executor will send the graph specifications
-to a remote processor and execute that graph. The execution results
-will be passed to consumer nodes as outputs of this node.
-END
-}
-op {
- graph_op_name: "RepeatDataset"
- endpoint {
- name: "RepeatDataset"
- }
- summary: "Creates a dataset that emits the outputs of `input_dataset` `count` times."
-}
-op {
- graph_op_name: "RequantizationRange"
- endpoint {
- name: "RequantizationRange"
- }
- summary: "Given a quantized tensor described by (input, input_min, input_max), outputs a"
- description: <<END
-range that covers the actual values present in that tensor. This op is
-typically used to produce the requested_output_min and requested_output_max for
-Requantize.
-END
-}
-op {
- graph_op_name: "Requantize"
- endpoint {
- name: "Requantize"
- }
- summary: "Convert the quantized \'input\' tensor into a lower-precision \'output\', using the"
- description: <<END
-output range specified with 'requested_output_min' and 'requested_output_max'.
-
-[input_min, input_max] are scalar floats that specify the range for the float
-interpretation of the 'input' data. For example, if input_min is -1.0f and
-input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
-value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
-END
-}
-op {
- graph_op_name: "Reshape"
- endpoint {
- name: "Reshape"
- }
- summary: "Reshapes a tensor."
- description: <<END
-Given `tensor`, this operation returns a tensor that has the same values
-as `tensor` with shape `shape`.
-
-If one component of `shape` is the special value -1, the size of that dimension
-is computed so that the total size remains constant. In particular, a `shape`
-of `[-1]` flattens into 1-D. At most one component of `shape` can be -1.
-
-If `shape` is 1-D or higher, then the operation returns a tensor with shape
-`shape` filled with the values of `tensor`. In this case, the number of elements
-implied by `shape` must be the same as the number of elements in `tensor`.
-
-For example:
-
-```
-# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
-# tensor 't' has shape [9]
-reshape(t, [3, 3]) ==> [[1, 2, 3],
- [4, 5, 6],
- [7, 8, 9]]
-
-# tensor 't' is [[[1, 1], [2, 2]],
-# [[3, 3], [4, 4]]]
-# tensor 't' has shape [2, 2, 2]
-reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
- [3, 3, 4, 4]]
-
-# tensor 't' is [[[1, 1, 1],
-# [2, 2, 2]],
-# [[3, 3, 3],
-# [4, 4, 4]],
-# [[5, 5, 5],
-# [6, 6, 6]]]
-# tensor 't' has shape [3, 2, 3]
-# pass '[-1]' to flatten 't'
-reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
-
-# -1 can also be used to infer the shape
-
-# -1 is inferred to be 9:
-reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
- [4, 4, 4, 5, 5, 5, 6, 6, 6]]
-# -1 is inferred to be 2:
-reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
- [4, 4, 4, 5, 5, 5, 6, 6, 6]]
-# -1 is inferred to be 3:
-reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
- [2, 2, 2],
- [3, 3, 3]],
- [[4, 4, 4],
- [5, 5, 5],
- [6, 6, 6]]]
-
-# tensor 't' is [7]
-# shape `[]` reshapes to a scalar
-reshape(t, []) ==> 7
-```
-END
-}
-op {
- graph_op_name: "ResizeArea"
- endpoint {
- name: "ResizeArea"
- }
- summary: "Resize `images` to `size` using area interpolation."
- description: <<END
-Input images can be of different types but output images are always float.
-
-Each output pixel is computed by first transforming the pixel's footprint into
-the input tensor and then averaging the pixels that intersect the footprint. An
-input pixel's contribution to the average is weighted by the fraction of its
-area that intersects the footprint. This is the same as OpenCV's INTER_AREA.
-END
-}
-op {
- graph_op_name: "ResizeBicubic"
- endpoint {
- name: "ResizeBicubic"
- }
- summary: "Resize `images` to `size` using bicubic interpolation."
- description: <<END
-Input images can be of different types but output images are always float.
-END
-}
-op {
- graph_op_name: "ResizeBicubicGrad"
- endpoint {
- name: "ResizeBicubicGrad"
- }
- summary: "Computes the gradient of bicubic interpolation."
-}
-op {
- graph_op_name: "ResizeBilinear"
- endpoint {
- name: "ResizeBilinear"
- }
- summary: "Resize `images` to `size` using bilinear interpolation."
- description: <<END
-Input images can be of different types but output images are always float.
-END
-}
-op {
- graph_op_name: "ResizeBilinearGrad"
- endpoint {
- name: "ResizeBilinearGrad"
- }
- summary: "Computes the gradient of bilinear interpolation."
-}
-op {
- graph_op_name: "ResizeNearestNeighbor"
- endpoint {
- name: "ResizeNearestNeighbor"
- }
- summary: "Resize `images` to `size` using nearest neighbor interpolation."
-}
-op {
- graph_op_name: "ResizeNearestNeighborGrad"
- endpoint {
- name: "ResizeNearestNeighborGrad"
- }
- summary: "Computes the gradient of nearest neighbor interpolation."
-}
-op {
- graph_op_name: "ResourceApplyAdadelta"
- endpoint {
- name: "ResourceApplyAdadelta"
- }
- summary: "Update \'*var\' according to the adadelta scheme."
- description: <<END
-accum = rho() * accum + (1 - rho()) * grad.square();
-update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
-update_accum = rho() * update_accum + (1 - rho()) * update.square();
-var -= update;
-END
-}
-op {
- graph_op_name: "ResourceApplyAdagrad"
- endpoint {
- name: "ResourceApplyAdagrad"
- }
- summary: "Update \'*var\' according to the adagrad scheme."
- description: <<END
-accum += grad * grad
-var -= lr * grad * (1 / sqrt(accum))
-END
-}
-op {
- graph_op_name: "ResourceApplyAdagradDA"
- endpoint {
- name: "ResourceApplyAdagradDA"
- }
- summary: "Update \'*var\' according to the proximal adagrad scheme."
-}
-op {
- graph_op_name: "ResourceApplyAdam"
- endpoint {
- name: "ResourceApplyAdam"
- }
- summary: "Update \'*var\' according to the Adam algorithm."
- description: <<END
-lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
-m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
-v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
-variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
-END
-}
-op {
- graph_op_name: "ResourceApplyCenteredRMSProp"
- endpoint {
- name: "ResourceApplyCenteredRMSProp"
- }
- summary: "Update \'*var\' according to the centered RMSProp algorithm."
- description: <<END
-The centered RMSProp algorithm uses an estimate of the centered second moment
-(i.e., the variance) for normalization, as opposed to regular RMSProp, which
-uses the (uncentered) second moment. This often helps with training, but is
-slightly more expensive in terms of computation and memory.
-
-Note that in dense implementation of this algorithm, mg, ms, and mom will
-update even if the grad is zero, but in this sparse implementation, mg, ms,
-and mom will not update in iterations during which the grad is zero.
-
-mean_square = decay * mean_square + (1-decay) * gradient ** 2
-mean_grad = decay * mean_grad + (1-decay) * gradient
-
-Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
-
-mg <- rho * mg_{t-1} + (1-rho) * grad
-ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
-var <- var - mom
-END
-}
-op {
- graph_op_name: "ResourceApplyFtrl"
- endpoint {
- name: "ResourceApplyFtrl"
- }
- summary: "Update \'*var\' according to the Ftrl-proximal scheme."
- description: <<END
-accum_new = accum + grad * grad
-linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-accum = accum_new
-END
-}
-op {
- graph_op_name: "ResourceApplyFtrlV2"
- endpoint {
- name: "ResourceApplyFtrlV2"
- }
- summary: "Update \'*var\' according to the Ftrl-proximal scheme."
- description: <<END
-grad_with_shrinkage = grad + 2 * l2_shrinkage * var
-accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
-linear += grad_with_shrinkage +
- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-accum = accum_new
-END
-}
-op {
- graph_op_name: "ResourceApplyGradientDescent"
- endpoint {
- name: "ResourceApplyGradientDescent"
- }
- summary: "Update \'*var\' by subtracting \'alpha\' * \'delta\' from it."
-}
-op {
- graph_op_name: "ResourceApplyMomentum"
- endpoint {
- name: "ResourceApplyMomentum"
- }
- summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you"
- description: <<END
-want to use Nesterov momentum.
-
-accum = accum * momentum + grad
-var -= lr * accum
-END
-}
-op {
- graph_op_name: "ResourceApplyProximalAdagrad"
- endpoint {
- name: "ResourceApplyProximalAdagrad"
- }
- summary: "Update \'*var\' and \'*accum\' according to FOBOS with Adagrad learning rate."
- description: <<END
-accum += grad * grad
-prox_v = var - lr * grad * (1 / sqrt(accum))
-var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
-END
-}
-op {
- graph_op_name: "ResourceApplyProximalGradientDescent"
- endpoint {
- name: "ResourceApplyProximalGradientDescent"
- }
- summary: "Update \'*var\' as FOBOS algorithm with fixed learning rate."
- description: <<END
-prox_v = var - alpha * delta
-var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
-END
-}
-op {
- graph_op_name: "ResourceApplyRMSProp"
- endpoint {
- name: "ResourceApplyRMSProp"
- }
- summary: "Update \'*var\' according to the RMSProp algorithm."
- description: <<END
-Note that in dense implementation of this algorithm, ms and mom will
-update even if the grad is zero, but in this sparse implementation, ms
-and mom will not update in iterations during which the grad is zero.
-
-mean_square = decay * mean_square + (1-decay) * gradient ** 2
-Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
-
-ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
-var <- var - mom
-END
-}
-op {
- graph_op_name: "ResourceSparseApplyAdadelta"
- endpoint {
- name: "ResourceSparseApplyAdadelta"
- }
- summary: "var: Should be from a Variable()."
-}
-op {
- graph_op_name: "ResourceSparseApplyAdagrad"
- endpoint {
- name: "ResourceSparseApplyAdagrad"
- }
- summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
- description: <<END
-That is for rows we have grad for, we update var and accum as follows:
-accum += grad * grad
-var -= lr * grad * (1 / sqrt(accum))
-END
-}
-op {
- graph_op_name: "ResourceSparseApplyAdagradDA"
- endpoint {
- name: "ResourceSparseApplyAdagradDA"
- }
- summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme."
-}
-op {
- graph_op_name: "ResourceSparseApplyCenteredRMSProp"
- endpoint {
- name: "ResourceSparseApplyCenteredRMSProp"
- }
- summary: "Update \'*var\' according to the centered RMSProp algorithm."
- description: <<END
-The centered RMSProp algorithm uses an estimate of the centered second moment
-(i.e., the variance) for normalization, as opposed to regular RMSProp, which
-uses the (uncentered) second moment. This often helps with training, but is
-slightly more expensive in terms of computation and memory.
-
-Note that in dense implementation of this algorithm, mg, ms, and mom will
-update even if the grad is zero, but in this sparse implementation, mg, ms,
-and mom will not update in iterations during which the grad is zero.
-
-mean_square = decay * mean_square + (1-decay) * gradient ** 2
-mean_grad = decay * mean_grad + (1-decay) * gradient
-Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
-
-ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
-var <- var - mom
-END
-}
-op {
- graph_op_name: "ResourceSparseApplyFtrl"
- endpoint {
- name: "ResourceSparseApplyFtrl"
- }
- summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
- description: <<END
-That is for rows we have grad for, we update var, accum and linear as follows:
-accum_new = accum + grad * grad
-linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-accum = accum_new
-END
-}
-op {
- graph_op_name: "ResourceSparseApplyFtrlV2"
- endpoint {
- name: "ResourceSparseApplyFtrlV2"
- }
- summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
- description: <<END
-That is for rows we have grad for, we update var, accum and linear as follows:
-grad_with_shrinkage = grad + 2 * l2_shrinkage * var
-accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
-linear += grad_with_shrinkage +
- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-accum = accum_new
-END
-}
-op {
- graph_op_name: "ResourceSparseApplyMomentum"
- endpoint {
- name: "ResourceSparseApplyMomentum"
- }
- summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme."
- description: <<END
-Set use_nesterov = True if you want to use Nesterov momentum.
-
-That is for rows we have grad for, we update var and accum as follows:
-
-accum = accum * momentum + grad
-var -= lr * accum
-END
-}
-op {
- graph_op_name: "ResourceSparseApplyProximalAdagrad"
- endpoint {
- name: "ResourceSparseApplyProximalAdagrad"
- }
- summary: "Sparse update entries in \'*var\' and \'*accum\' according to FOBOS algorithm."
- description: <<END
-That is for rows we have grad for, we update var and accum as follows:
-accum += grad * grad
-prox_v = var
-prox_v -= lr * grad * (1 / sqrt(accum))
-var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
-END
-}
-op {
- graph_op_name: "ResourceSparseApplyProximalGradientDescent"
- endpoint {
- name: "ResourceSparseApplyProximalGradientDescent"
- }
- summary: "Sparse update \'*var\' as FOBOS algorithm with fixed learning rate."
- description: <<END
-That is for rows we have grad for, we update var as follows:
-prox_v = var - alpha * grad
-var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
-END
-}
-op {
- graph_op_name: "ResourceSparseApplyRMSProp"
- endpoint {
- name: "ResourceSparseApplyRMSProp"
- }
- summary: "Update \'*var\' according to the RMSProp algorithm."
- description: <<END
-Note that in dense implementation of this algorithm, ms and mom will
-update even if the grad is zero, but in this sparse implementation, ms
-and mom will not update in iterations during which the grad is zero.
-
-mean_square = decay * mean_square + (1-decay) * gradient ** 2
-Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
-
-ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
-var <- var - mom
-END
-}
-op {
- graph_op_name: "ResourceStridedSliceAssign"
- endpoint {
- name: "ResourceStridedSliceAssign"
- }
- summary: "Assign `value` to the sliced l-value reference of `ref`."
- description: <<END
-The values of `value` are assigned to the positions in the variable
-`ref` that are selected by the slice parameters. The slice parameters
-`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
-
-NOTE this op currently does not support broadcasting and so `value`'s
-shape must be exactly the shape produced by the slice of `ref`.
-END
-}
-op {
- graph_op_name: "Restore"
- endpoint {
- name: "Restore"
- }
- summary: "Restores a tensor from checkpoint files."
- description: <<END
-Reads a tensor stored in one or several files. If there are several files (for
-instance because a tensor was saved as slices), `file_pattern` may contain
-wildcard symbols (`*` and `?`) in the filename portion only, not in the
-directory portion.
-
-If a `file_pattern` matches several files, `preferred_shard` can be used to hint
-in which file the requested tensor is likely to be found. This op will first
-open the file at index `preferred_shard` in the list of matching files and try
-to restore tensors from that file. Only if some tensors or tensor slices are
-not found in that first file, then the Op opens all the files. Setting
-`preferred_shard` to match the value passed as the `shard` input
-of a matching `Save` Op may speed up Restore. This attribute only affects
-performance, not correctness. The default value -1 means files are processed in
-order.
-
-See also `RestoreSlice`.
-END
-}
-op {
- graph_op_name: "RestoreIterator"
- endpoint {
- name: "RestoreIterator"
- }
- summary: "Restores the state of the `iterator` from the checkpoint saved at `path` using \"SaveIterator\"."
-}
-op {
- graph_op_name: "RestoreSlice"
- endpoint {
- name: "RestoreSlice"
- }
- summary: "Restores a tensor from checkpoint files."
- description: <<END
-This is like `Restore` except that restored tensor can be listed as filling
-only a slice of a larger tensor. `shape_and_slice` specifies the shape of the
-larger tensor and the slice that the restored tensor covers.
-
-The `shape_and_slice` input has the same format as the
-elements of the `shapes_and_slices` input of the `SaveSlices` op.
-END
-}
-op {
- graph_op_name: "RestoreV2"
- endpoint {
- name: "RestoreV2"
- }
- summary: "Restores tensors from a V2 checkpoint."
- description: <<END
-For backward compatibility with the V1 format, this Op currently allows
-restoring from a V1 checkpoint as well:
- - This Op first attempts to find the V2 index file pointed to by "prefix", and
- if found proceed to read it as a V2 checkpoint;
- - Otherwise the V1 read path is invoked.
-Relying on this behavior is not recommended, as the ability to fall back to read
-V1 might be deprecated and eventually removed.
-
-By default, restores the named tensors in full. If the caller wishes to restore
-specific slices of stored tensors, "shape_and_slices" should be non-empty
-strings and correspondingly well-formed.
-
-Callers must ensure all the named tensors are indeed stored in the checkpoint.
-END
-}
-op {
- graph_op_name: "Reverse"
- endpoint {
- name: "Reverse"
- }
- summary: "Reverses specific dimensions of a tensor."
- description: <<END
-Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
-of `tensor`, this operation reverses each dimension i of `tensor` where
-`dims[i]` is `True`.
-
-`tensor` can have up to 8 dimensions. The number of dimensions
-of `tensor` must equal the number of elements in `dims`. In other words:
-
-`rank(tensor) = size(dims)`
-
-For example:
-
-```
-# tensor 't' is [[[[ 0, 1, 2, 3],
-# [ 4, 5, 6, 7],
-# [ 8, 9, 10, 11]],
-# [[12, 13, 14, 15],
-# [16, 17, 18, 19],
-# [20, 21, 22, 23]]]]
-# tensor 't' shape is [1, 2, 3, 4]
-
-# 'dims' is [False, False, False, True]
-reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
- [ 7, 6, 5, 4],
- [ 11, 10, 9, 8]],
- [[15, 14, 13, 12],
- [19, 18, 17, 16],
- [23, 22, 21, 20]]]]
-
-# 'dims' is [False, True, False, False]
-reverse(t, dims) ==> [[[[12, 13, 14, 15],
- [16, 17, 18, 19],
- [20, 21, 22, 23]
- [[ 0, 1, 2, 3],
- [ 4, 5, 6, 7],
- [ 8, 9, 10, 11]]]]
-
-# 'dims' is [False, False, True, False]
-reverse(t, dims) ==> [[[[8, 9, 10, 11],
- [4, 5, 6, 7],
- [0, 1, 2, 3]]
- [[20, 21, 22, 23],
- [16, 17, 18, 19],
- [12, 13, 14, 15]]]]
-```
-END
-}
-op {
- graph_op_name: "ReverseSequence"
- endpoint {
- name: "ReverseSequence"
- }
- summary: "Reverses variable length slices."
- description: <<END
-This op first slices `input` along the dimension `batch_dim`, and for each
-slice `i`, reverses the first `seq_lengths[i]` elements along
-the dimension `seq_dim`.
-
-The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
-and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
-
-The output slice `i` along dimension `batch_dim` is then given by input
-slice `i`, with the first `seq_lengths[i]` slices along dimension
-`seq_dim` reversed.
-
-For example:
-
-```
-# Given this:
-batch_dim = 0
-seq_dim = 1
-input.dims = (4, 8, ...)
-seq_lengths = [7, 2, 3, 5]
-
-# then slices of input are reversed on seq_dim, but only up to seq_lengths:
-output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
-output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
-output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
-output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
-
-# while entries past seq_lens are copied through:
-output[0, 7:, :, ...] = input[0, 7:, :, ...]
-output[1, 2:, :, ...] = input[1, 2:, :, ...]
-output[2, 3:, :, ...] = input[2, 3:, :, ...]
-output[3, 2:, :, ...] = input[3, 2:, :, ...]
-```
-
-In contrast, if:
-
-```
-# Given this:
-batch_dim = 2
-seq_dim = 0
-input.dims = (8, ?, 4, ...)
-seq_lengths = [7, 2, 3, 5]
-
-# then slices of input are reversed on seq_dim, but only up to seq_lengths:
-output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
-output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
-output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
-output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
-
-# while entries past seq_lens are copied through:
-output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
-output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
-output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
-output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
-```
-END
-}
-op {
- graph_op_name: "ReverseV2"
- endpoint {
- name: "ReverseV2"
- }
- summary: "Reverses specific dimensions of a tensor."
- description: <<END
-NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
-`tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
-
-Given a `tensor`, and a `int32` tensor `axis` representing the set of
-dimensions of `tensor` to reverse. This operation reverses each dimension
-`i` for which there exists `j` s.t. `axis[j] == i`.
-
-`tensor` can have up to 8 dimensions. The number of dimensions specified
-in `axis` may be 0 or more entries. If an index is specified more than
-once, a InvalidArgument error is raised.
-
-For example:
-
-```
-# tensor 't' is [[[[ 0, 1, 2, 3],
-# [ 4, 5, 6, 7],
-# [ 8, 9, 10, 11]],
-# [[12, 13, 14, 15],
-# [16, 17, 18, 19],
-# [20, 21, 22, 23]]]]
-# tensor 't' shape is [1, 2, 3, 4]
-
-# 'dims' is [3] or 'dims' is -1
-reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
- [ 7, 6, 5, 4],
- [ 11, 10, 9, 8]],
- [[15, 14, 13, 12],
- [19, 18, 17, 16],
- [23, 22, 21, 20]]]]
-
-# 'dims' is '[1]' (or 'dims' is '[-3]')
-reverse(t, dims) ==> [[[[12, 13, 14, 15],
- [16, 17, 18, 19],
- [20, 21, 22, 23]
- [[ 0, 1, 2, 3],
- [ 4, 5, 6, 7],
- [ 8, 9, 10, 11]]]]
-
-# 'dims' is '[2]' (or 'dims' is '[-2]')
-reverse(t, dims) ==> [[[[8, 9, 10, 11],
- [4, 5, 6, 7],
- [0, 1, 2, 3]]
- [[20, 21, 22, 23],
- [16, 17, 18, 19],
- [12, 13, 14, 15]]]]
-```
-END
-}
-op {
- graph_op_name: "Rint"
- endpoint {
- name: "Rint"
- }
- summary: "Returns element-wise integer closest to x."
- description: <<END
-If the result is midway between two representable values,
-the even representable is chosen.
-For example:
-
-```
-rint(-1.5) ==> -2.0
-rint(0.5000001) ==> 1.0
-rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
-```
-END
-}
-op {
- graph_op_name: "Round"
- endpoint {
- name: "Round"
- }
- summary: "Rounds the values of a tensor to the nearest integer, element-wise."
- description: <<END
-Rounds half to even. Also known as bankers rounding. If you want to round
-according to the current system rounding mode use std::cint.
-END
-}
-op {
- graph_op_name: "Rsqrt"
- endpoint {
- name: "Rsqrt"
- }
- summary: "Computes reciprocal of square root of x element-wise."
- description: <<END
-I.e., \\(y = 1 / \sqrt{x}\\).
-END
-}
-op {
- graph_op_name: "RsqrtGrad"
- endpoint {
- name: "RsqrtGrad"
- }
- summary: "Computes the gradient for the rsqrt of `x` wrt its input."
- description: <<END
-Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
-is the corresponding input gradient.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_RFFT.pbtxt b/tensorflow/core/api_def/base_api/api_def_RFFT.pbtxt
new file mode 100644
index 0000000000..9bf680e2ca
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RFFT.pbtxt
@@ -0,0 +1,40 @@
+op {
+ graph_op_name: "RFFT"
+ in_arg {
+ name: "input"
+ description: <<END
+A float32 tensor.
+END
+ }
+ in_arg {
+ name: "fft_length"
+ description: <<END
+An int32 tensor of shape [1]. The FFT length.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A complex64 tensor of the same rank as `input`. The inner-most
+ dimension of `input` is replaced with the `fft_length / 2 + 1` unique
+ frequency components of its 1D Fourier transform.
+
+@compatibility(numpy)
+Equivalent to np.fft.rfft
+@end_compatibility
+END
+ }
+ summary: "Real-valued fast Fourier transform."
+ description: <<END
+Computes the 1-dimensional discrete Fourier transform of a real-valued signal
+over the inner-most dimension of `input`.
+
+Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
+`fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
+followed by the `fft_length / 2` positive-frequency terms.
+
+Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
+corresponding dimension of `input`, the dimension is cropped. If it is larger,
+the dimension is padded with zeros.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RFFT2D.pbtxt b/tensorflow/core/api_def/base_api/api_def_RFFT2D.pbtxt
new file mode 100644
index 0000000000..a901ee704c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RFFT2D.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "RFFT2D"
+ in_arg {
+ name: "input"
+ description: <<END
+A float32 tensor.
+END
+ }
+ in_arg {
+ name: "fft_length"
+ description: <<END
+An int32 tensor of shape [2]. The FFT length for each dimension.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A complex64 tensor of the same rank as `input`. The inner-most 2
+ dimensions of `input` are replaced with their 2D Fourier transform. The
+ inner-most dimension contains `fft_length / 2 + 1` unique frequency
+ components.
+
+@compatibility(numpy)
+Equivalent to np.fft.rfft2
+@end_compatibility
+END
+ }
+ summary: "2D real-valued fast Fourier transform."
+ description: <<END
+Computes the 2-dimensional discrete Fourier transform of a real-valued signal
+over the inner-most 2 dimensions of `input`.
+
+Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
+`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
+of `output`: the zero-frequency term, followed by the `fft_length / 2`
+positive-frequency terms.
+
+Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
+corresponding dimension of `input`, the dimension is cropped. If it is larger,
+the dimension is padded with zeros.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RFFT3D.pbtxt b/tensorflow/core/api_def/base_api/api_def_RFFT3D.pbtxt
new file mode 100644
index 0000000000..d4a3ad667b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RFFT3D.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "RFFT3D"
+ in_arg {
+ name: "input"
+ description: <<END
+A float32 tensor.
+END
+ }
+ in_arg {
+ name: "fft_length"
+ description: <<END
+An int32 tensor of shape [3]. The FFT length for each dimension.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A complex64 tensor of the same rank as `input`. The inner-most 3
+ dimensions of `input` are replaced with the their 3D Fourier transform. The
+ inner-most dimension contains `fft_length / 2 + 1` unique frequency
+ components.
+
+@compatibility(numpy)
+Equivalent to np.fft.rfftn with 3 dimensions.
+@end_compatibility
+END
+ }
+ summary: "3D real-valued fast Fourier transform."
+ description: <<END
+Computes the 3-dimensional discrete Fourier transform of a real-valued signal
+over the inner-most 3 dimensions of `input`.
+
+Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
+`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
+of `output`: the zero-frequency term, followed by the `fft_length / 2`
+positive-frequency terms.
+
+Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
+corresponding dimension of `input`, the dimension is cropped. If it is larger,
+the dimension is padded with zeros.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RGBToHSV.pbtxt b/tensorflow/core/api_def/base_api/api_def_RGBToHSV.pbtxt
new file mode 100644
index 0000000000..08629610ed
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RGBToHSV.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "RGBToHSV"
+ in_arg {
+ name: "images"
+ description: <<END
+1-D or higher rank. RGB data to convert. Last dimension must be size 3.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+`images` converted to HSV.
+END
+ }
+ summary: "Converts one or more images from RGB to HSV."
+ description: <<END
+Outputs a tensor of the same shape as the `images` tensor, containing the HSV
+value of the pixels. The output is only well defined if the value in `images`
+are in `[0,1]`.
+
+`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
+`output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
+corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RandomCrop.pbtxt b/tensorflow/core/api_def/base_api/api_def_RandomCrop.pbtxt
new file mode 100644
index 0000000000..cd549dda14
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RandomCrop.pbtxt
@@ -0,0 +1,44 @@
+op {
+ graph_op_name: "RandomCrop"
+ in_arg {
+ name: "image"
+ description: <<END
+3-D of shape `[height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+1-D of length 2 containing: `crop_height`, `crop_width`..
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+3-D of shape `[crop_height, crop_width, channels].`
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+An second seed to avoid seed collision.
+END
+ }
+ summary: "Randomly crop `image`."
+ description: <<END
+`size` is a 1-D int64 tensor with 2 elements representing the crop height and
+width. The values must be non negative.
+
+This Op picks a random location in `image` and crops a `height` by `width`
+rectangle from that location. The random location is picked so the cropped
+area will fit inside the original image.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RandomGamma.pbtxt b/tensorflow/core/api_def/base_api/api_def_RandomGamma.pbtxt
new file mode 100644
index 0000000000..0a10392b6d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RandomGamma.pbtxt
@@ -0,0 +1,45 @@
+op {
+ graph_op_name: "RandomGamma"
+ in_arg {
+ name: "shape"
+ description: <<END
+1-D integer tensor. Shape of independent samples to draw from each
+distribution described by the shape parameters given in alpha.
+END
+ }
+ in_arg {
+ name: "alpha"
+ description: <<END
+A tensor in which each scalar is a "shape" parameter describing the
+associated gamma distribution.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A tensor with shape `shape + shape(alpha)`. Each slice
+`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
+`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either `seed` or `seed2` are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ summary: "Outputs random values from the Gamma distribution(s) described by alpha."
+ description: <<END
+This op uses the algorithm by Marsaglia et al. to acquire samples via
+transformation-rejection from pairs of uniform and normal random variables.
+See http://dl.acm.org/citation.cfm?id=358414
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RandomPoisson.pbtxt b/tensorflow/core/api_def/base_api/api_def_RandomPoisson.pbtxt
new file mode 100644
index 0000000000..b75ecd2e19
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RandomPoisson.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RandomPoisson"
+ summary: "Use RandomPoissonV2 instead."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RandomPoissonV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_RandomPoissonV2.pbtxt
new file mode 100644
index 0000000000..3aa8c30294
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RandomPoissonV2.pbtxt
@@ -0,0 +1,51 @@
+op {
+ graph_op_name: "RandomPoissonV2"
+ in_arg {
+ name: "shape"
+ description: <<END
+1-D integer tensor. Shape of independent samples to draw from each
+distribution described by the shape parameters given in rate.
+END
+ }
+ in_arg {
+ name: "rate"
+ description: <<END
+A tensor in which each scalar is a "rate" parameter describing the
+associated poisson distribution.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A tensor with shape `shape + shape(rate)`. Each slice
+`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
+`rate[i0, i1, ...iN]`.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either `seed` or `seed2` are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ summary: "Outputs random values from the Poisson distribution(s) described by rate."
+ description: <<END
+This op uses two algorithms, depending on rate. If rate >= 10, then
+the algorithm by Hormann is used to acquire samples via
+transformation-rejection.
+See http://www.sciencedirect.com/science/article/pii/0167668793909974.
+
+Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
+random variables.
+See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
+Programming, Volume 2. Addison Wesley
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RandomShuffle.pbtxt b/tensorflow/core/api_def/base_api/api_def_RandomShuffle.pbtxt
new file mode 100644
index 0000000000..7490361712
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RandomShuffle.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "RandomShuffle"
+ in_arg {
+ name: "value"
+ description: <<END
+The tensor to be shuffled.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A tensor of same shape and type as `value`, shuffled along its first
+dimension.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either `seed` or `seed2` are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ summary: "Randomly shuffles a tensor along its first dimension."
+ description: <<END
+ The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
+ to one and only one `output[i]`. For example, a mapping that might occur for a
+ 3x2 tensor is:
+
+```
+[[1, 2], [[5, 6],
+ [3, 4], ==> [1, 2],
+ [5, 6]] [3, 4]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RandomShuffleQueue.pbtxt b/tensorflow/core/api_def/base_api/api_def_RandomShuffleQueue.pbtxt
new file mode 100644
index 0000000000..258ef00b5c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RandomShuffleQueue.pbtxt
@@ -0,0 +1,68 @@
+op {
+ graph_op_name: "RandomShuffleQueue"
+ visibility: SKIP
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the queue.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a value.
+END
+ }
+ attr {
+ name: "shapes"
+ description: <<END
+The shape of each component in a value. The length of this attr must
+be either 0 or the same as the length of component_types. If the length of
+this attr is 0, the shapes of queue elements are not constrained, and
+only one element may be dequeued at a time.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+The upper bound on the number of elements in this queue.
+Negative numbers mean no limit.
+END
+ }
+ attr {
+ name: "min_after_dequeue"
+ description: <<END
+Dequeue will block unless there would be this
+many elements after the dequeue or the queue is closed. This
+ensures a minimum level of mixing of elements.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 is set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, a random seed is used.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this queue is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this queue will be shared under the given name
+across multiple sessions.
+END
+ }
+ summary: "A queue that randomizes the order of elements."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RandomShuffleQueueV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_RandomShuffleQueueV2.pbtxt
new file mode 100644
index 0000000000..bb5a0fb8ed
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RandomShuffleQueueV2.pbtxt
@@ -0,0 +1,70 @@
+op {
+ graph_op_name: "RandomShuffleQueueV2"
+ endpoint {
+ name: "RandomShuffleQueue"
+ }
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the queue.
+END
+ }
+ attr {
+ name: "component_types"
+ description: <<END
+The type of each component in a value.
+END
+ }
+ attr {
+ name: "shapes"
+ description: <<END
+The shape of each component in a value. The length of this attr must
+be either 0 or the same as the length of component_types. If the length of
+this attr is 0, the shapes of queue elements are not constrained, and
+only one element may be dequeued at a time.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+The upper bound on the number of elements in this queue.
+Negative numbers mean no limit.
+END
+ }
+ attr {
+ name: "min_after_dequeue"
+ description: <<END
+Dequeue will block unless there would be this
+many elements after the dequeue or the queue is closed. This
+ensures a minimum level of mixing of elements.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 is set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, a random seed is used.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this queue is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this queue will be shared under the given name
+across multiple sessions.
+END
+ }
+ summary: "A queue that randomizes the order of elements."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RandomStandardNormal.pbtxt b/tensorflow/core/api_def/base_api/api_def_RandomStandardNormal.pbtxt
new file mode 100644
index 0000000000..d534785b14
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RandomStandardNormal.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "RandomStandardNormal"
+ endpoint {
+ name: "RandomNormal"
+ }
+ in_arg {
+ name: "shape"
+ description: <<END
+The shape of the output tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A tensor of the specified shape filled with random normal values.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either `seed` or `seed2` are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the output.
+END
+ }
+ summary: "Outputs random values from a normal distribution."
+ description: <<END
+The generated values will have mean 0 and standard deviation 1.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RandomUniform.pbtxt b/tensorflow/core/api_def/base_api/api_def_RandomUniform.pbtxt
new file mode 100644
index 0000000000..148a5b1c9a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RandomUniform.pbtxt
@@ -0,0 +1,40 @@
+op {
+ graph_op_name: "RandomUniform"
+ in_arg {
+ name: "shape"
+ description: <<END
+The shape of the output tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A tensor of the specified shape filled with uniform random values.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either `seed` or `seed2` are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the output.
+END
+ }
+ summary: "Outputs random values from a uniform distribution."
+ description: <<END
+The generated values follow a uniform distribution in the range `[0, 1)`. The
+lower bound 0 is included in the range, while the upper bound 1 is excluded.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RandomUniformInt.pbtxt b/tensorflow/core/api_def/base_api/api_def_RandomUniformInt.pbtxt
new file mode 100644
index 0000000000..76a8f4b3e4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RandomUniformInt.pbtxt
@@ -0,0 +1,51 @@
+op {
+ graph_op_name: "RandomUniformInt"
+ in_arg {
+ name: "shape"
+ description: <<END
+The shape of the output tensor.
+END
+ }
+ in_arg {
+ name: "minval"
+ description: <<END
+0-D. Inclusive lower bound on the generated integers.
+END
+ }
+ in_arg {
+ name: "maxval"
+ description: <<END
+0-D. Exclusive upper bound on the generated integers.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A tensor of the specified shape filled with uniform random integers.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either `seed` or `seed2` are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ summary: "Outputs random integers from a uniform distribution."
+ description: <<END
+The generated values are uniform integers in the range `[minval, maxval)`.
+The lower bound `minval` is included in the range, while the upper bound
+`maxval` is excluded.
+
+The random integers are slightly biased unless `maxval - minval` is an exact
+power of two. The bias is small for values of `maxval - minval` significantly
+smaller than the range of the output (either `2^32` or `2^64`).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Range.pbtxt b/tensorflow/core/api_def/base_api/api_def_Range.pbtxt
new file mode 100644
index 0000000000..cf1021ccfb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Range.pbtxt
@@ -0,0 +1,41 @@
+op {
+ graph_op_name: "Range"
+ in_arg {
+ name: "start"
+ description: <<END
+0-D (scalar). First entry in the sequence.
+END
+ }
+ in_arg {
+ name: "limit"
+ description: <<END
+0-D (scalar). Upper limit of sequence, exclusive.
+END
+ }
+ in_arg {
+ name: "delta"
+ description: <<END
+0-D (scalar). Optional. Default is 1. Number that increments `start`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+1-D.
+END
+ }
+ summary: "Creates a sequence of numbers."
+ description: <<END
+This operation creates a sequence of numbers that begins at `start` and
+extends by increments of `delta` up to but not including `limit`.
+
+For example:
+
+```
+# 'start' is 3
+# 'limit' is 18
+# 'delta' is 3
+tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RangeDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_RangeDataset.pbtxt
new file mode 100644
index 0000000000..a9e14b8a05
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RangeDataset.pbtxt
@@ -0,0 +1,22 @@
+op {
+ graph_op_name: "RangeDataset"
+ in_arg {
+ name: "start"
+ description: <<END
+corresponds to start in python's xrange().
+END
+ }
+ in_arg {
+ name: "stop"
+ description: <<END
+corresponds to stop in python's xrange().
+END
+ }
+ in_arg {
+ name: "step"
+ description: <<END
+corresponds to step in python's xrange().
+END
+ }
+ summary: "Creates a dataset with a range of values. Corresponds to python\'s xrange."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Rank.pbtxt b/tensorflow/core/api_def/base_api/api_def_Rank.pbtxt
new file mode 100644
index 0000000000..ec1c61671d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Rank.pbtxt
@@ -0,0 +1,19 @@
+op {
+ graph_op_name: "Rank"
+ summary: "Returns the rank of a tensor."
+ description: <<END
+This operation returns an integer representing the rank of `input`.
+
+For example:
+
+```
+# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
+# shape of tensor 't' is [2, 2, 3]
+rank(t) ==> 3
+```
+
+**Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
+of a tensor is the number of indices required to uniquely select each element
+of the tensor. Rank is also known as "order", "degree", or "ndims."
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReadFile.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReadFile.pbtxt
new file mode 100644
index 0000000000..6161453d47
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReadFile.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReadFile"
+ summary: "Reads and outputs the entire contents of the input filename."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReadVariableOp.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReadVariableOp.pbtxt
new file mode 100644
index 0000000000..eaa41b462c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReadVariableOp.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "ReadVariableOp"
+ in_arg {
+ name: "resource"
+ description: <<END
+handle to the resource in which to store the variable.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+the dtype of the value.
+END
+ }
+ summary: "Reads the value of a variable."
+ description: <<END
+The tensor returned by this operation is immutable.
+
+The value returned by this operation is guaranteed to be influenced by all the
+writes on which this operation depends directly or indirectly, and to not be
+influenced by any of the writes which depend directly or indirectly on this
+operation.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderNumRecordsProduced.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderNumRecordsProduced.pbtxt
new file mode 100644
index 0000000000..27c74890f2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderNumRecordsProduced.pbtxt
@@ -0,0 +1,15 @@
+op {
+ graph_op_name: "ReaderNumRecordsProduced"
+ visibility: SKIP
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ summary: "Returns the number of records this Reader has produced."
+ description: <<END
+This is the same as the number of ReaderRead executions that have
+succeeded.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderNumRecordsProducedV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderNumRecordsProducedV2.pbtxt
new file mode 100644
index 0000000000..caf4f6b903
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderNumRecordsProducedV2.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "ReaderNumRecordsProducedV2"
+ endpoint {
+ name: "ReaderNumRecordsProduced"
+ }
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ summary: "Returns the number of records this Reader has produced."
+ description: <<END
+This is the same as the number of ReaderRead executions that have
+succeeded.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderNumWorkUnitsCompleted.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderNumWorkUnitsCompleted.pbtxt
new file mode 100644
index 0000000000..ba9143534d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderNumWorkUnitsCompleted.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "ReaderNumWorkUnitsCompleted"
+ visibility: SKIP
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ summary: "Returns the number of work units this Reader has finished processing."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt
new file mode 100644
index 0000000000..5289c84240
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt
@@ -0,0 +1,13 @@
+op {
+ graph_op_name: "ReaderNumWorkUnitsCompletedV2"
+ endpoint {
+ name: "ReaderNumWorkUnitsCompleted"
+ }
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ summary: "Returns the number of work units this Reader has finished processing."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderRead.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderRead.pbtxt
new file mode 100644
index 0000000000..624b1c7fad
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderRead.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "ReaderRead"
+ visibility: SKIP
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ in_arg {
+ name: "queue_handle"
+ description: <<END
+Handle to a Queue, with string work items.
+END
+ }
+ out_arg {
+ name: "key"
+ description: <<END
+A scalar.
+END
+ }
+ out_arg {
+ name: "value"
+ description: <<END
+A scalar.
+END
+ }
+ summary: "Returns the next record (key, value pair) produced by a Reader."
+ description: <<END
+Will dequeue from the input queue if necessary (e.g. when the
+Reader needs to start reading from a new file since it has finished
+with the previous file).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderReadUpTo.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderReadUpTo.pbtxt
new file mode 100644
index 0000000000..53e6e44838
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderReadUpTo.pbtxt
@@ -0,0 +1,41 @@
+op {
+ graph_op_name: "ReaderReadUpTo"
+ visibility: SKIP
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a `Reader`.
+END
+ }
+ in_arg {
+ name: "queue_handle"
+ description: <<END
+Handle to a `Queue`, with string work items.
+END
+ }
+ in_arg {
+ name: "num_records"
+ description: <<END
+number of records to read from `Reader`.
+END
+ }
+ out_arg {
+ name: "keys"
+ description: <<END
+A 1-D tensor.
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+A 1-D tensor.
+END
+ }
+ summary: "Returns up to `num_records` (key, value) pairs produced by a Reader."
+ description: <<END
+Will dequeue from the input queue if necessary (e.g. when the
+Reader needs to start reading from a new file since it has finished
+with the previous file).
+It may return less than `num_records` even before the last batch.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderReadUpToV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderReadUpToV2.pbtxt
new file mode 100644
index 0000000000..c1d2206ffe
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderReadUpToV2.pbtxt
@@ -0,0 +1,43 @@
+op {
+ graph_op_name: "ReaderReadUpToV2"
+ endpoint {
+ name: "ReaderReadUpTo"
+ }
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a `Reader`.
+END
+ }
+ in_arg {
+ name: "queue_handle"
+ description: <<END
+Handle to a `Queue`, with string work items.
+END
+ }
+ in_arg {
+ name: "num_records"
+ description: <<END
+number of records to read from `Reader`.
+END
+ }
+ out_arg {
+ name: "keys"
+ description: <<END
+A 1-D tensor.
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+A 1-D tensor.
+END
+ }
+ summary: "Returns up to `num_records` (key, value) pairs produced by a Reader."
+ description: <<END
+Will dequeue from the input queue if necessary (e.g. when the
+Reader needs to start reading from a new file since it has finished
+with the previous file).
+It may return less than `num_records` even before the last batch.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderReadV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderReadV2.pbtxt
new file mode 100644
index 0000000000..6a6c4efdf5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderReadV2.pbtxt
@@ -0,0 +1,36 @@
+op {
+ graph_op_name: "ReaderReadV2"
+ endpoint {
+ name: "ReaderRead"
+ }
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ in_arg {
+ name: "queue_handle"
+ description: <<END
+Handle to a Queue, with string work items.
+END
+ }
+ out_arg {
+ name: "key"
+ description: <<END
+A scalar.
+END
+ }
+ out_arg {
+ name: "value"
+ description: <<END
+A scalar.
+END
+ }
+ summary: "Returns the next record (key, value pair) produced by a Reader."
+ description: <<END
+Will dequeue from the input queue if necessary (e.g. when the
+Reader needs to start reading from a new file since it has finished
+with the previous file).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderReset.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderReset.pbtxt
new file mode 100644
index 0000000000..1ddb494293
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderReset.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "ReaderReset"
+ visibility: SKIP
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ summary: "Restore a Reader to its initial clean state."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderResetV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderResetV2.pbtxt
new file mode 100644
index 0000000000..6ac5b77d27
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderResetV2.pbtxt
@@ -0,0 +1,13 @@
+op {
+ graph_op_name: "ReaderResetV2"
+ endpoint {
+ name: "ReaderReset"
+ }
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ summary: "Restore a Reader to its initial clean state."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderRestoreState.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderRestoreState.pbtxt
new file mode 100644
index 0000000000..05084ba367
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderRestoreState.pbtxt
@@ -0,0 +1,22 @@
+op {
+ graph_op_name: "ReaderRestoreState"
+ visibility: SKIP
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ in_arg {
+ name: "state"
+ description: <<END
+Result of a ReaderSerializeState of a Reader with type
+matching reader_handle.
+END
+ }
+ summary: "Restore a reader to a previously saved state."
+ description: <<END
+Not all Readers support being restored, so this can produce an
+Unimplemented error.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderRestoreStateV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderRestoreStateV2.pbtxt
new file mode 100644
index 0000000000..35e053d0ea
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderRestoreStateV2.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "ReaderRestoreStateV2"
+ endpoint {
+ name: "ReaderRestoreState"
+ }
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ in_arg {
+ name: "state"
+ description: <<END
+Result of a ReaderSerializeState of a Reader with type
+matching reader_handle.
+END
+ }
+ summary: "Restore a reader to a previously saved state."
+ description: <<END
+Not all Readers support being restored, so this can produce an
+Unimplemented error.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderSerializeState.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderSerializeState.pbtxt
new file mode 100644
index 0000000000..401c22abd0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderSerializeState.pbtxt
@@ -0,0 +1,15 @@
+op {
+ graph_op_name: "ReaderSerializeState"
+ visibility: SKIP
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ summary: "Produce a string tensor that encodes the state of a Reader."
+ description: <<END
+Not all Readers support being serialized, so this can produce an
+Unimplemented error.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReaderSerializeStateV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReaderSerializeStateV2.pbtxt
new file mode 100644
index 0000000000..855ba3c2ee
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReaderSerializeStateV2.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "ReaderSerializeStateV2"
+ endpoint {
+ name: "ReaderSerializeState"
+ }
+ in_arg {
+ name: "reader_handle"
+ description: <<END
+Handle to a Reader.
+END
+ }
+ summary: "Produce a string tensor that encodes the state of a Reader."
+ description: <<END
+Not all Readers support being serialized, so this can produce an
+Unimplemented error.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Real.pbtxt b/tensorflow/core/api_def/base_api/api_def_Real.pbtxt
new file mode 100644
index 0000000000..225d45fd70
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Real.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "Real"
+ summary: "Returns the real part of a complex number."
+ description: <<END
+Given a tensor `input` of complex numbers, this operation returns a tensor of
+type `float` that is the real part of each element in `input`. All elements in
+`input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
+ part returned by this operation and *b* is the imaginary part.
+
+For example:
+
+```
+# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+tf.real(input) ==> [-2.25, 3.25]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RealDiv.pbtxt b/tensorflow/core/api_def/base_api/api_def_RealDiv.pbtxt
new file mode 100644
index 0000000000..da0e55b08f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RealDiv.pbtxt
@@ -0,0 +1,10 @@
+op {
+ graph_op_name: "RealDiv"
+ summary: "Returns x / y element-wise for real types."
+ description: <<END
+If `x` and `y` are reals, this will return the floating-point division.
+
+*NOTE*: `Div` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Reciprocal.pbtxt b/tensorflow/core/api_def/base_api/api_def_Reciprocal.pbtxt
new file mode 100644
index 0000000000..c66b84e268
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Reciprocal.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "Reciprocal"
+ summary: "Computes the reciprocal of x element-wise."
+ description: <<END
+I.e., \\(y = 1 / x\\).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReciprocalGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReciprocalGrad.pbtxt
new file mode 100644
index 0000000000..583e5ecee1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReciprocalGrad.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "ReciprocalGrad"
+ visibility: HIDDEN
+ summary: "Computes the gradient for the inverse of `x` wrt its input."
+ description: <<END
+Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
+is the corresponding input gradient.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RecordInput.pbtxt b/tensorflow/core/api_def/base_api/api_def_RecordInput.pbtxt
new file mode 100644
index 0000000000..7efc8cd833
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RecordInput.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "RecordInput"
+ out_arg {
+ name: "records"
+ description: <<END
+A tensor of shape [batch_size].
+END
+ }
+ attr {
+ name: "file_pattern"
+ description: <<END
+Glob pattern for the data files.
+END
+ }
+ attr {
+ name: "file_random_seed"
+ description: <<END
+Random seeds used to produce randomized records.
+END
+ }
+ attr {
+ name: "file_shuffle_shift_ratio"
+ description: <<END
+Shifts the list of files after the list is randomly
+shuffled.
+END
+ }
+ attr {
+ name: "file_buffer_size"
+ description: <<END
+The randomization shuffling buffer.
+END
+ }
+ attr {
+ name: "file_parallelism"
+ description: <<END
+How many sstables are opened and concurrently iterated over.
+END
+ }
+ attr {
+ name: "batch_size"
+ description: <<END
+The batch size.
+END
+ }
+ summary: "Emits randomized records."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReduceJoin.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReduceJoin.pbtxt
new file mode 100644
index 0000000000..ca7e0d3bee
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReduceJoin.pbtxt
@@ -0,0 +1,59 @@
+op {
+ graph_op_name: "ReduceJoin"
+ in_arg {
+ name: "inputs"
+ description: <<END
+The input to be joined. All reduced indices must have non-zero size.
+END
+ }
+ in_arg {
+ name: "reduction_indices"
+ description: <<END
+The dimensions to reduce over. Dimensions are reduced in the
+order specified. Omitting `reduction_indices` is equivalent to passing
+`[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Has shape equal to that of the input with reduced dimensions removed or
+set to `1` depending on `keep_dims`.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If `True`, retain reduced dimensions with length `1`.
+END
+ }
+ attr {
+ name: "separator"
+ description: <<END
+The separator to use when joining.
+END
+ }
+ summary: "Joins a string Tensor across the given dimensions."
+ description: <<END
+Computes the string join across dimensions in the given string Tensor of shape
+`[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input
+strings with the given separator (default: empty string). Negative indices are
+counted backwards from the end, with `-1` being equivalent to `n - 1`.
+
+For example:
+
+```python
+# tensor `a` is [["a", "b"], ["c", "d"]]
+tf.reduce_join(a, 0) ==> ["ac", "bd"]
+tf.reduce_join(a, 1) ==> ["ab", "cd"]
+tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
+tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
+tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
+tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
+tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
+tf.reduce_join(a, [0, 1]) ==> ["acbd"]
+tf.reduce_join(a, [1, 0]) ==> ["abcd"]
+tf.reduce_join(a, []) ==> ["abcd"]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RefEnter.pbtxt b/tensorflow/core/api_def/base_api/api_def_RefEnter.pbtxt
new file mode 100644
index 0000000000..092f285b27
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RefEnter.pbtxt
@@ -0,0 +1,41 @@
+op {
+ graph_op_name: "RefEnter"
+ visibility: HIDDEN
+ in_arg {
+ name: "data"
+ description: <<END
+The tensor to be made available to the child frame.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The same tensor as `data`.
+END
+ }
+ attr {
+ name: "frame_name"
+ description: <<END
+The name of the child frame.
+END
+ }
+ attr {
+ name: "is_constant"
+ description: <<END
+If true, the output is constant within the child frame.
+END
+ }
+ attr {
+ name: "parallel_iterations"
+ description: <<END
+The number of iterations allowed to run in parallel.
+END
+ }
+ summary: "Creates or finds a child frame, and makes `data` available to the child frame."
+ description: <<END
+The unique `frame_name` is used by the `Executor` to identify frames. If
+`is_constant` is true, `output` is a constant in the child frame; otherwise
+it may be changed in the child frame. At most `parallel_iterations` iterations
+are run in parallel in the child frame.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RefExit.pbtxt b/tensorflow/core/api_def/base_api/api_def_RefExit.pbtxt
new file mode 100644
index 0000000000..6d3083d6d9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RefExit.pbtxt
@@ -0,0 +1,20 @@
+op {
+ graph_op_name: "RefExit"
+ visibility: HIDDEN
+ in_arg {
+ name: "data"
+ description: <<END
+The tensor to be made available to the parent frame.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The same tensor as `data`.
+END
+ }
+ summary: "Exits the current frame to its parent frame."
+ description: <<END
+Exit makes its input `data` available to the parent frame.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RefIdentity.pbtxt b/tensorflow/core/api_def/base_api/api_def_RefIdentity.pbtxt
new file mode 100644
index 0000000000..b29606837e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RefIdentity.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "RefIdentity"
+ visibility: HIDDEN
+ summary: "Return the same ref tensor as the input ref tensor."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RefMerge.pbtxt b/tensorflow/core/api_def/base_api/api_def_RefMerge.pbtxt
new file mode 100644
index 0000000000..cc7ad303c5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RefMerge.pbtxt
@@ -0,0 +1,30 @@
+op {
+ graph_op_name: "RefMerge"
+ visibility: HIDDEN
+ in_arg {
+ name: "inputs"
+ description: <<END
+The input tensors, exactly one of which will become available.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Will be set to the available input tensor.
+END
+ }
+ out_arg {
+ name: "value_index"
+ description: <<END
+The index of the chosen input tensor in `inputs`.
+END
+ }
+ summary: "Forwards the value of an available tensor from `inputs` to `output`."
+ description: <<END
+`Merge` waits for at least one of the tensors in `inputs` to become available.
+It is usually combined with `Switch` to implement branching.
+
+`Merge` forwards the first tensor for become available to `output`, and sets
+`value_index` to its index in `inputs`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RefNextIteration.pbtxt b/tensorflow/core/api_def/base_api/api_def_RefNextIteration.pbtxt
new file mode 100644
index 0000000000..fd126e99b8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RefNextIteration.pbtxt
@@ -0,0 +1,16 @@
+op {
+ graph_op_name: "RefNextIteration"
+ in_arg {
+ name: "data"
+ description: <<END
+The tensor to be made available to the next iteration.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The same tensor as `data`.
+END
+ }
+ summary: "Makes its input available to the next iteration."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RefSelect.pbtxt b/tensorflow/core/api_def/base_api/api_def_RefSelect.pbtxt
new file mode 100644
index 0000000000..24a0c4684e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RefSelect.pbtxt
@@ -0,0 +1,22 @@
+op {
+ graph_op_name: "RefSelect"
+ in_arg {
+ name: "index"
+ description: <<END
+A scalar that determines the input that gets selected.
+END
+ }
+ in_arg {
+ name: "inputs"
+ description: <<END
+A list of ref tensors, one of which will be forwarded to `output`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The forwarded tensor.
+END
+ }
+ summary: "Forwards the `index`th element of `inputs` to `output`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RefSwitch.pbtxt b/tensorflow/core/api_def/base_api/api_def_RefSwitch.pbtxt
new file mode 100644
index 0000000000..11db13a17e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RefSwitch.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "RefSwitch"
+ in_arg {
+ name: "data"
+ description: <<END
+The ref tensor to be forwarded to the appropriate output.
+END
+ }
+ in_arg {
+ name: "pred"
+ description: <<END
+A scalar that specifies which output port will receive data.
+END
+ }
+ out_arg {
+ name: "output_false"
+ description: <<END
+If `pred` is false, data will be forwarded to this output.
+END
+ }
+ out_arg {
+ name: "output_true"
+ description: <<END
+If `pred` is true, data will be forwarded to this output.
+END
+ }
+ summary: "Forwards the ref tensor `data` to the output port determined by `pred`."
+ description: <<END
+If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
+the data goes to `output_false`.
+
+See also `Switch` and `Merge`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Relu.pbtxt b/tensorflow/core/api_def/base_api/api_def_Relu.pbtxt
new file mode 100644
index 0000000000..44f79b0e29
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Relu.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Relu"
+ summary: "Computes rectified linear: `max(features, 0)`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Relu6.pbtxt b/tensorflow/core/api_def/base_api/api_def_Relu6.pbtxt
new file mode 100644
index 0000000000..13a737394c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Relu6.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Relu6"
+ summary: "Computes rectified linear 6: `min(max(features, 0), 6)`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Relu6Grad.pbtxt b/tensorflow/core/api_def/base_api/api_def_Relu6Grad.pbtxt
new file mode 100644
index 0000000000..fc81506f66
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Relu6Grad.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "Relu6Grad"
+ visibility: HIDDEN
+ in_arg {
+ name: "gradients"
+ description: <<END
+The backpropagated gradients to the corresponding Relu6 operation.
+END
+ }
+ in_arg {
+ name: "features"
+ description: <<END
+The features passed as input to the corresponding Relu6 operation, or
+its output; using either one produces the same result.
+END
+ }
+ out_arg {
+ name: "backprops"
+ description: <<END
+The gradients:
+`gradients * (features > 0) * (features < 6)`.
+END
+ }
+ summary: "Computes rectified linear 6 gradients for a Relu6 operation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReluGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReluGrad.pbtxt
new file mode 100644
index 0000000000..94affbc3b7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReluGrad.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "ReluGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "gradients"
+ description: <<END
+The backpropagated gradients to the corresponding Relu operation.
+END
+ }
+ in_arg {
+ name: "features"
+ description: <<END
+The features passed as input to the corresponding Relu operation, OR
+the outputs of that operation (both work equivalently).
+END
+ }
+ out_arg {
+ name: "backprops"
+ description: <<END
+`gradients * (features > 0)`.
+END
+ }
+ summary: "Computes rectified linear gradients for a Relu operation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RemoteCall.pbtxt b/tensorflow/core/api_def/base_api/api_def_RemoteCall.pbtxt
new file mode 100644
index 0000000000..1f75f32ebc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RemoteCall.pbtxt
@@ -0,0 +1,40 @@
+op {
+ graph_op_name: "RemoteCall"
+ in_arg {
+ name: "target"
+ description: <<END
+A fully specified device name where we want to run the function.
+END
+ }
+ in_arg {
+ name: "args"
+ description: <<END
+A list of arguments for the function.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A list of return values.
+END
+ }
+ attr {
+ name: "Tin"
+ description: <<END
+The type list for the arguments.
+END
+ }
+ attr {
+ name: "Tout"
+ description: <<END
+The type list for the return values.
+END
+ }
+ attr {
+ name: "f"
+ description: <<END
+The function to run remotely.
+END
+ }
+ summary: "Runs function `f` on a remote device indicated by `target`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RemoteFusedGraphExecute.pbtxt b/tensorflow/core/api_def/base_api/api_def_RemoteFusedGraphExecute.pbtxt
new file mode 100644
index 0000000000..190df5ecbb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RemoteFusedGraphExecute.pbtxt
@@ -0,0 +1,32 @@
+op {
+ graph_op_name: "RemoteFusedGraphExecute"
+ in_arg {
+ name: "inputs"
+ description: <<END
+Arbitrary number of tensors with arbitrary data types
+END
+ }
+ out_arg {
+ name: "outputs"
+ description: <<END
+Arbitrary number of tensors with arbitrary data types
+END
+ }
+ attr {
+ name: "serialized_remote_fused_graph_execute_info"
+ description: <<END
+Serialized protocol buffer
+of RemoteFusedGraphExecuteInfo which contains graph specifications.
+END
+ }
+ summary: "Execute a sub graph on a remote processor."
+ description: <<END
+The graph specifications(such as graph itself, input tensors and output names)
+are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo
+as serialized_remote_fused_graph_execute_info.
+The specifications will be passed to a dedicated registered
+remote fused graph executor. The executor will send the graph specifications
+to a remote processor and execute that graph. The execution results
+will be passed to consumer nodes as outputs of this node.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RepeatDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_RepeatDataset.pbtxt
new file mode 100644
index 0000000000..fc6169cd32
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RepeatDataset.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "RepeatDataset"
+ in_arg {
+ name: "count"
+ description: <<END
+A scalar representing the number of times that `input_dataset` should
+be repeated. A value of `-1` indicates that it should be repeated infinitely.
+END
+ }
+ summary: "Creates a dataset that emits the outputs of `input_dataset` `count` times."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RequantizationRange.pbtxt b/tensorflow/core/api_def/base_api/api_def_RequantizationRange.pbtxt
new file mode 100644
index 0000000000..07bbd4ac60
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RequantizationRange.pbtxt
@@ -0,0 +1,39 @@
+op {
+ graph_op_name: "RequantizationRange"
+ in_arg {
+ name: "input_min"
+ description: <<END
+The float value that the minimum quantized input value represents.
+END
+ }
+ in_arg {
+ name: "input_max"
+ description: <<END
+The float value that the maximum quantized input value represents.
+END
+ }
+ out_arg {
+ name: "output_min"
+ description: <<END
+The computed min output.
+END
+ }
+ out_arg {
+ name: "output_max"
+ description: <<END
+the computed max output.
+END
+ }
+ attr {
+ name: "Tinput"
+ description: <<END
+The type of the input.
+END
+ }
+ summary: "Given a quantized tensor described by (input, input_min, input_max), outputs a"
+ description: <<END
+range that covers the actual values present in that tensor. This op is
+typically used to produce the requested_output_min and requested_output_max for
+Requantize.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Requantize.pbtxt b/tensorflow/core/api_def/base_api/api_def_Requantize.pbtxt
new file mode 100644
index 0000000000..1b03f63b26
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Requantize.pbtxt
@@ -0,0 +1,60 @@
+op {
+ graph_op_name: "Requantize"
+ in_arg {
+ name: "input_min"
+ description: <<END
+The float value that the minimum quantized input value represents.
+END
+ }
+ in_arg {
+ name: "input_max"
+ description: <<END
+The float value that the maximum quantized input value represents.
+END
+ }
+ in_arg {
+ name: "requested_output_min"
+ description: <<END
+The float value that the minimum quantized output value represents.
+END
+ }
+ in_arg {
+ name: "requested_output_max"
+ description: <<END
+The float value that the maximum quantized output value represents.
+END
+ }
+ out_arg {
+ name: "output_min"
+ description: <<END
+The requested_output_min value is copied into this output.
+END
+ }
+ out_arg {
+ name: "output_max"
+ description: <<END
+The requested_output_max value is copied into this output.
+END
+ }
+ attr {
+ name: "Tinput"
+ description: <<END
+The type of the input.
+END
+ }
+ attr {
+ name: "out_type"
+ description: <<END
+The type of the output. Should be a lower bit depth than Tinput.
+END
+ }
+ summary: "Convert the quantized \'input\' tensor into a lower-precision \'output\', using the"
+ description: <<END
+output range specified with 'requested_output_min' and 'requested_output_max'.
+
+[input_min, input_max] are scalar floats that specify the range for the float
+interpretation of the 'input' data. For example, if input_min is -1.0f and
+input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
+value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Reshape.pbtxt b/tensorflow/core/api_def/base_api/api_def_Reshape.pbtxt
new file mode 100644
index 0000000000..fa32b25374
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Reshape.pbtxt
@@ -0,0 +1,68 @@
+op {
+ graph_op_name: "Reshape"
+ in_arg {
+ name: "shape"
+ description: <<END
+Defines the shape of the output tensor.
+END
+ }
+ summary: "Reshapes a tensor."
+ description: <<END
+Given `tensor`, this operation returns a tensor that has the same values
+as `tensor` with shape `shape`.
+
+If one component of `shape` is the special value -1, the size of that dimension
+is computed so that the total size remains constant. In particular, a `shape`
+of `[-1]` flattens into 1-D. At most one component of `shape` can be -1.
+
+If `shape` is 1-D or higher, then the operation returns a tensor with shape
+`shape` filled with the values of `tensor`. In this case, the number of elements
+implied by `shape` must be the same as the number of elements in `tensor`.
+
+For example:
+
+```
+# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
+# tensor 't' has shape [9]
+reshape(t, [3, 3]) ==> [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+
+# tensor 't' is [[[1, 1], [2, 2]],
+# [[3, 3], [4, 4]]]
+# tensor 't' has shape [2, 2, 2]
+reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
+ [3, 3, 4, 4]]
+
+# tensor 't' is [[[1, 1, 1],
+# [2, 2, 2]],
+# [[3, 3, 3],
+# [4, 4, 4]],
+# [[5, 5, 5],
+# [6, 6, 6]]]
+# tensor 't' has shape [3, 2, 3]
+# pass '[-1]' to flatten 't'
+reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
+
+# -1 can also be used to infer the shape
+
+# -1 is inferred to be 9:
+reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
+ [4, 4, 4, 5, 5, 5, 6, 6, 6]]
+# -1 is inferred to be 2:
+reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
+ [4, 4, 4, 5, 5, 5, 6, 6, 6]]
+# -1 is inferred to be 3:
+reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
+ [2, 2, 2],
+ [3, 3, 3]],
+ [[4, 4, 4],
+ [5, 5, 5],
+ [6, 6, 6]]]
+
+# tensor 't' is [7]
+# shape `[]` reshapes to a scalar
+reshape(t, []) ==> 7
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResizeArea.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResizeArea.pbtxt
new file mode 100644
index 0000000000..6dc321a544
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResizeArea.pbtxt
@@ -0,0 +1,40 @@
+op {
+ graph_op_name: "ResizeArea"
+ in_arg {
+ name: "images"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
+new size for the images.
+END
+ }
+ out_arg {
+ name: "resized_images"
+ description: <<END
+4-D with shape
+`[batch, new_height, new_width, channels]`.
+END
+ }
+ attr {
+ name: "align_corners"
+ description: <<END
+If true, rescale input by (new_height - 1) / (height - 1), which
+exactly aligns the 4 corners of images and resized images. If false, rescale
+by new_height / height. Treat similarly the width dimension.
+END
+ }
+ summary: "Resize `images` to `size` using area interpolation."
+ description: <<END
+Input images can be of different types but output images are always float.
+
+Each output pixel is computed by first transforming the pixel's footprint into
+the input tensor and then averaging the pixels that intersect the footprint. An
+input pixel's contribution to the average is weighted by the fraction of its
+area that intersects the footprint. This is the same as OpenCV's INTER_AREA.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResizeBicubic.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResizeBicubic.pbtxt
new file mode 100644
index 0000000000..06e645e3ee
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResizeBicubic.pbtxt
@@ -0,0 +1,35 @@
+op {
+ graph_op_name: "ResizeBicubic"
+ in_arg {
+ name: "images"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
+new size for the images.
+END
+ }
+ out_arg {
+ name: "resized_images"
+ description: <<END
+4-D with shape
+`[batch, new_height, new_width, channels]`.
+END
+ }
+ attr {
+ name: "align_corners"
+ description: <<END
+If true, rescale input by (new_height - 1) / (height - 1), which
+exactly aligns the 4 corners of images and resized images. If false, rescale
+by new_height / height. Treat similarly the width dimension.
+END
+ }
+ summary: "Resize `images` to `size` using bicubic interpolation."
+ description: <<END
+Input images can be of different types but output images are always float.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResizeBicubicGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResizeBicubicGrad.pbtxt
new file mode 100644
index 0000000000..bf5201d82e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResizeBicubicGrad.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "ResizeBicubicGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "grads"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "original_image"
+ description: <<END
+4-D with shape `[batch, orig_height, orig_width, channels]`,
+The image tensor that was resized.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D with shape `[batch, orig_height, orig_width, channels]`.
+Gradients with respect to the input image. Input image must have been
+float or double.
+END
+ }
+ attr {
+ name: "align_corners"
+ description: <<END
+If true, rescale grads by (orig_height - 1) / (height - 1), which
+exactly aligns the 4 corners of grads and original_image. If false, rescale by
+orig_height / height. Treat similarly the width dimension.
+END
+ }
+ summary: "Computes the gradient of bicubic interpolation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResizeBilinear.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResizeBilinear.pbtxt
new file mode 100644
index 0000000000..0768e437fa
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResizeBilinear.pbtxt
@@ -0,0 +1,35 @@
+op {
+ graph_op_name: "ResizeBilinear"
+ in_arg {
+ name: "images"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
+new size for the images.
+END
+ }
+ out_arg {
+ name: "resized_images"
+ description: <<END
+4-D with shape
+`[batch, new_height, new_width, channels]`.
+END
+ }
+ attr {
+ name: "align_corners"
+ description: <<END
+If true, rescale input by (new_height - 1) / (height - 1), which
+exactly aligns the 4 corners of images and resized images. If false, rescale
+by new_height / height. Treat similarly the width dimension.
+END
+ }
+ summary: "Resize `images` to `size` using bilinear interpolation."
+ description: <<END
+Input images can be of different types but output images are always float.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResizeBilinearGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResizeBilinearGrad.pbtxt
new file mode 100644
index 0000000000..fba64203c2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResizeBilinearGrad.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "ResizeBilinearGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "grads"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "original_image"
+ description: <<END
+4-D with shape `[batch, orig_height, orig_width, channels]`,
+The image tensor that was resized.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D with shape `[batch, orig_height, orig_width, channels]`.
+Gradients with respect to the input image. Input image must have been
+float or double.
+END
+ }
+ attr {
+ name: "align_corners"
+ description: <<END
+If true, rescale grads by (orig_height - 1) / (height - 1), which
+exactly aligns the 4 corners of grads and original_image. If false, rescale by
+orig_height / height. Treat similarly the width dimension.
+END
+ }
+ summary: "Computes the gradient of bilinear interpolation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResizeNearestNeighbor.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResizeNearestNeighbor.pbtxt
new file mode 100644
index 0000000000..a74db4c9dc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResizeNearestNeighbor.pbtxt
@@ -0,0 +1,32 @@
+op {
+ graph_op_name: "ResizeNearestNeighbor"
+ in_arg {
+ name: "images"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
+new size for the images.
+END
+ }
+ out_arg {
+ name: "resized_images"
+ description: <<END
+4-D with shape
+`[batch, new_height, new_width, channels]`.
+END
+ }
+ attr {
+ name: "align_corners"
+ description: <<END
+If true, rescale input by (new_height - 1) / (height - 1), which
+exactly aligns the 4 corners of images and resized images. If false, rescale
+by new_height / height. Treat similarly the width dimension.
+END
+ }
+ summary: "Resize `images` to `size` using nearest neighbor interpolation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResizeNearestNeighborGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResizeNearestNeighborGrad.pbtxt
new file mode 100644
index 0000000000..4ef1547eb4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResizeNearestNeighborGrad.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "ResizeNearestNeighborGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "grads"
+ description: <<END
+4-D with shape `[batch, height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
+original input size.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
+with respect to the input image.
+END
+ }
+ attr {
+ name: "align_corners"
+ description: <<END
+If true, rescale grads by (orig_height - 1) / (height - 1), which
+exactly aligns the 4 corners of grads and original_image. If false, rescale by
+orig_height / height. Treat similarly the width dimension.
+END
+ }
+ summary: "Computes the gradient of nearest neighbor interpolation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdadelta.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdadelta.pbtxt
new file mode 100644
index 0000000000..f2708a8348
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdadelta.pbtxt
@@ -0,0 +1,59 @@
+op {
+ graph_op_name: "ResourceApplyAdadelta"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum_update"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Constant factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var, accum and update_accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'*var\' according to the adadelta scheme."
+ description: <<END
+accum = rho() * accum + (1 - rho()) * grad.square();
+update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
+update_accum = rho() * update_accum + (1 - rho()) * update.square();
+var -= update;
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdagrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdagrad.pbtxt
new file mode 100644
index 0000000000..5982d4d371
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdagrad.pbtxt
@@ -0,0 +1,40 @@
+op {
+ graph_op_name: "ResourceApplyAdagrad"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the adagrad scheme."
+ description: <<END
+accum += grad * grad
+var -= lr * grad * (1 / sqrt(accum))
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdagradDA.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdagradDA.pbtxt
new file mode 100644
index 0000000000..254e0c609a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdagradDA.pbtxt
@@ -0,0 +1,59 @@
+op {
+ graph_op_name: "ResourceApplyAdagradDA"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "gradient_accumulator"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "gradient_squared_accumulator"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "global_step"
+ description: <<END
+Training step number. Must be a scalar.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var and accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'*var\' according to the proximal adagrad scheme."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdam.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdam.pbtxt
new file mode 100644
index 0000000000..bea1fd6762
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyAdam.pbtxt
@@ -0,0 +1,84 @@
+op {
+ graph_op_name: "ResourceApplyAdam"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "m"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "v"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "beta1_power"
+ description: <<END
+Must be a scalar.
+END
+ }
+ in_arg {
+ name: "beta2_power"
+ description: <<END
+Must be a scalar.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "beta1"
+ description: <<END
+Momentum factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "beta2"
+ description: <<END
+Momentum factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Ridge term. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var, m, and v tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ attr {
+ name: "use_nesterov"
+ description: <<END
+If `True`, uses the nesterov update.
+END
+ }
+ summary: "Update \'*var\' according to the Adam algorithm."
+ description: <<END
+lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
+m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
+v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
+variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyCenteredRMSProp.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyCenteredRMSProp.pbtxt
new file mode 100644
index 0000000000..9cc033cc89
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyCenteredRMSProp.pbtxt
@@ -0,0 +1,80 @@
+op {
+ graph_op_name: "ResourceApplyCenteredRMSProp"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mg"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "ms"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mom"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Ridge term. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var, mg, ms, and mom tensors is
+protected by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the centered RMSProp algorithm."
+ description: <<END
+The centered RMSProp algorithm uses an estimate of the centered second moment
+(i.e., the variance) for normalization, as opposed to regular RMSProp, which
+uses the (uncentered) second moment. This often helps with training, but is
+slightly more expensive in terms of computation and memory.
+
+Note that in dense implementation of this algorithm, mg, ms, and mom will
+update even if the grad is zero, but in this sparse implementation, mg, ms,
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+mean_grad = decay * mean_grad + (1-decay) * gradient
+
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
+
+mg <- rho * mg_{t-1} + (1-rho) * grad
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
+var <- var - mom
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyFtrl.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyFtrl.pbtxt
new file mode 100644
index 0000000000..a6a29b164e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyFtrl.pbtxt
@@ -0,0 +1,67 @@
+op {
+ graph_op_name: "ResourceApplyFtrl"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "linear"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regulariation. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regulariation. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "lr_power"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+accum_new = accum + grad * grad
+linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyFtrlV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyFtrlV2.pbtxt
new file mode 100644
index 0000000000..a71c835b78
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyFtrlV2.pbtxt
@@ -0,0 +1,69 @@
+op {
+ graph_op_name: "ResourceApplyFtrlV2"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "linear"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regulariation. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 shrinkage regulariation. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "lr_power"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+grad_with_shrinkage = grad + 2 * l2_shrinkage * var
+accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
+linear += grad_with_shrinkage +
+ (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyGradientDescent.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyGradientDescent.pbtxt
new file mode 100644
index 0000000000..01f235f224
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyGradientDescent.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "ResourceApplyGradientDescent"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "alpha"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "delta"
+ description: <<END
+The change.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, the subtraction will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'*var\' by subtracting \'alpha\' * \'delta\' from it."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyMomentum.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyMomentum.pbtxt
new file mode 100644
index 0000000000..d1a84a4c34
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyMomentum.pbtxt
@@ -0,0 +1,56 @@
+op {
+ graph_op_name: "ResourceApplyMomentum"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "momentum"
+ description: <<END
+Momentum. Must be a scalar.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ attr {
+ name: "use_nesterov"
+ description: <<END
+If `True`, the tensor passed to compute grad will be
+var - lr * momentum * accum, so in the end, the var you get is actually
+var - lr * momentum * accum.
+END
+ }
+ summary: "Update \'*var\' according to the momentum scheme. Set use_nesterov = True if you"
+ description: <<END
+want to use Nesterov momentum.
+
+accum = accum * momentum + grad
+var -= lr * accum
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyProximalAdagrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyProximalAdagrad.pbtxt
new file mode 100644
index 0000000000..1eaa86ea14
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyProximalAdagrad.pbtxt
@@ -0,0 +1,52 @@
+op {
+ graph_op_name: "ResourceApplyProximalAdagrad"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var and accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'*var\' and \'*accum\' according to FOBOS with Adagrad learning rate."
+ description: <<END
+accum += grad * grad
+prox_v = var - lr * grad * (1 / sqrt(accum))
+var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyProximalGradientDescent.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyProximalGradientDescent.pbtxt
new file mode 100644
index 0000000000..c22e931a2b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyProximalGradientDescent.pbtxt
@@ -0,0 +1,45 @@
+op {
+ graph_op_name: "ResourceApplyProximalGradientDescent"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "alpha"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "delta"
+ description: <<END
+The change.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the subtraction will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update \'*var\' as FOBOS algorithm with fixed learning rate."
+ description: <<END
+prox_v = var - alpha * delta
+var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceApplyRMSProp.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceApplyRMSProp.pbtxt
new file mode 100644
index 0000000000..2a24f23f9c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceApplyRMSProp.pbtxt
@@ -0,0 +1,66 @@
+op {
+ graph_op_name: "ResourceApplyRMSProp"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "ms"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mom"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Ridge term. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var, ms, and mom tensors is protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the RMSProp algorithm."
+ description: <<END
+Note that in dense implementation of this algorithm, ms and mom will
+update even if the grad is zero, but in this sparse implementation, ms
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceCountUpTo.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceCountUpTo.pbtxt
new file mode 100644
index 0000000000..bc70d79a1e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceCountUpTo.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "ResourceCountUpTo"
+ in_arg {
+ name: "resource"
+ description: <<END
+Should be from a scalar `Variable` node.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A copy of the input before increment. If nothing else modifies the
+input, the values produced will all be distinct.
+END
+ }
+ attr {
+ name: "limit"
+ description: <<END
+If incrementing ref would bring it above limit, instead generates an
+'OutOfRange' error.
+END
+ }
+ summary: "Increments variable pointed to by \'resource\' until it reaches \'limit\'."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceGather.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceGather.pbtxt
new file mode 100644
index 0000000000..ae5d38a501
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceGather.pbtxt
@@ -0,0 +1,19 @@
+op {
+ graph_op_name: "ResourceGather"
+ summary: "Gather slices from the variable pointed to by `resource` according to `indices`."
+ description: <<END
+`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
+Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
+
+```python
+ # Scalar indices
+ output[:, ..., :] = params[indices, :, ... :]
+
+ # Vector indices
+ output[i, :, ..., :] = params[indices[i], :, ... :]
+
+ # Higher rank indices
+ output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceScatterAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceScatterAdd.pbtxt
new file mode 100644
index 0000000000..9e0de08267
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceScatterAdd.pbtxt
@@ -0,0 +1,43 @@
+op {
+ graph_op_name: "ResourceScatterAdd"
+ in_arg {
+ name: "resource"
+ description: <<END
+Should be from a `Variable` node.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A tensor of indices into the first dimension of `ref`.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+A tensor of updated values to add to `ref`.
+END
+ }
+ summary: "Adds sparse updates to the variable referenced by `resource`."
+ description: <<END
+This operation computes
+
+ # Scalar indices
+ ref[indices, ...] += updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] += updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
+
+Duplicate entries are handled correctly: if multiple `indices` reference
+the same location, their contributions add.
+
+Requires `updates.shape = indices.shape + ref.shape[1:]`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceScatterUpdate.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceScatterUpdate.pbtxt
new file mode 100644
index 0000000000..947535c6c8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceScatterUpdate.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "ResourceScatterUpdate"
+ in_arg {
+ name: "resource"
+ description: <<END
+Should be from a `Variable` node.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A tensor of indices into the first dimension of `ref`.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+A tensor of updated values to add to `ref`.
+END
+ }
+ summary: "Assigns sparse updates to the variable referenced by `resource`."
+ description: <<END
+This operation computes
+
+ # Scalar indices
+ ref[indices, ...] = updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] = updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdadelta.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdadelta.pbtxt
new file mode 100644
index 0000000000..1bea6d614c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdadelta.pbtxt
@@ -0,0 +1,53 @@
+op {
+ graph_op_name: "ResourceSparseApplyAdadelta"
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum_update"
+ description: <<END
+: Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Learning rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Constant factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var and accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "var: Should be from a Variable()."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdagrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdagrad.pbtxt
new file mode 100644
index 0000000000..f646394760
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdagrad.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "ResourceSparseApplyAdagrad"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Learning rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
+ description: <<END
+That is for rows we have grad for, we update var and accum as follows:
+accum += grad * grad
+var -= lr * grad * (1 / sqrt(accum))
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdagradDA.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdagradDA.pbtxt
new file mode 100644
index 0000000000..96833d8f09
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyAdagradDA.pbtxt
@@ -0,0 +1,65 @@
+op {
+ graph_op_name: "ResourceSparseApplyAdagradDA"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "gradient_accumulator"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "gradient_squared_accumulator"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Learning rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "global_step"
+ description: <<END
+Training step number. Must be a scalar.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var and accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyCenteredRMSProp.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyCenteredRMSProp.pbtxt
new file mode 100644
index 0000000000..433d040fe7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyCenteredRMSProp.pbtxt
@@ -0,0 +1,84 @@
+op {
+ graph_op_name: "ResourceSparseApplyCenteredRMSProp"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mg"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "ms"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mom"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Ridge term. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var, ms and mom.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var, mg, ms, and mom tensors is
+protected by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the centered RMSProp algorithm."
+ description: <<END
+The centered RMSProp algorithm uses an estimate of the centered second moment
+(i.e., the variance) for normalization, as opposed to regular RMSProp, which
+uses the (uncentered) second moment. This often helps with training, but is
+slightly more expensive in terms of computation and memory.
+
+Note that in dense implementation of this algorithm, mg, ms, and mom will
+update even if the grad is zero, but in this sparse implementation, mg, ms,
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+mean_grad = decay * mean_grad + (1-decay) * gradient
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyFtrl.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyFtrl.pbtxt
new file mode 100644
index 0000000000..f75272a63b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyFtrl.pbtxt
@@ -0,0 +1,74 @@
+op {
+ graph_op_name: "ResourceSparseApplyFtrl"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "linear"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "lr_power"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+That is for rows we have grad for, we update var, accum and linear as follows:
+accum_new = accum + grad * grad
+linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyFtrlV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyFtrlV2.pbtxt
new file mode 100644
index 0000000000..45ea013ce8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyFtrlV2.pbtxt
@@ -0,0 +1,76 @@
+op {
+ graph_op_name: "ResourceSparseApplyFtrlV2"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "linear"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 shrinkage regulariation. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "lr_power"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+That is for rows we have grad for, we update var, accum and linear as follows:
+grad_with_shrinkage = grad + 2 * l2_shrinkage * var
+accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
+linear += grad_with_shrinkage +
+ (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyMomentum.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyMomentum.pbtxt
new file mode 100644
index 0000000000..671465377a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyMomentum.pbtxt
@@ -0,0 +1,64 @@
+op {
+ graph_op_name: "ResourceSparseApplyMomentum"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Learning rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ in_arg {
+ name: "momentum"
+ description: <<END
+Momentum. Must be a scalar.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ attr {
+ name: "use_nesterov"
+ description: <<END
+If `True`, the tensor passed to compute grad will be
+var - lr * momentum * accum, so in the end, the var you get is actually
+var - lr * momentum * accum.
+END
+ }
+ summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme."
+ description: <<END
+Set use_nesterov = True if you want to use Nesterov momentum.
+
+That is for rows we have grad for, we update var and accum as follows:
+
+accum = accum * momentum + grad
+var -= lr * accum
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyProximalAdagrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyProximalAdagrad.pbtxt
new file mode 100644
index 0000000000..f3a588adaa
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyProximalAdagrad.pbtxt
@@ -0,0 +1,60 @@
+op {
+ graph_op_name: "ResourceSparseApplyProximalAdagrad"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Learning rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var and accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Sparse update entries in \'*var\' and \'*accum\' according to FOBOS algorithm."
+ description: <<END
+That is for rows we have grad for, we update var and accum as follows:
+accum += grad * grad
+prox_v = var
+prox_v -= lr * grad * (1 / sqrt(accum))
+var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyProximalGradientDescent.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyProximalGradientDescent.pbtxt
new file mode 100644
index 0000000000..4a6333c0b5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyProximalGradientDescent.pbtxt
@@ -0,0 +1,52 @@
+op {
+ graph_op_name: "ResourceSparseApplyProximalGradientDescent"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "alpha"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the subtraction will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Sparse update \'*var\' as FOBOS algorithm with fixed learning rate."
+ description: <<END
+That is for rows we have grad for, we update var as follows:
+prox_v = var - alpha * grad
+var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyRMSProp.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyRMSProp.pbtxt
new file mode 100644
index 0000000000..a6310711ea
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceSparseApplyRMSProp.pbtxt
@@ -0,0 +1,72 @@
+op {
+ graph_op_name: "ResourceSparseApplyRMSProp"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "ms"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mom"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Ridge term. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var, ms and mom.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var, ms, and mom tensors is protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the RMSProp algorithm."
+ description: <<END
+Note that in dense implementation of this algorithm, ms and mom will
+update even if the grad is zero, but in this sparse implementation, ms
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceStridedSliceAssign.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceStridedSliceAssign.pbtxt
new file mode 100644
index 0000000000..ec8acbb5bf
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceStridedSliceAssign.pbtxt
@@ -0,0 +1,12 @@
+op {
+ graph_op_name: "ResourceStridedSliceAssign"
+ summary: "Assign `value` to the sliced l-value reference of `ref`."
+ description: <<END
+The values of `value` are assigned to the positions in the variable
+`ref` that are selected by the slice parameters. The slice parameters
+`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
+
+NOTE this op currently does not support broadcasting and so `value`'s
+shape must be exactly the shape produced by the slice of `ref`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Restore.pbtxt b/tensorflow/core/api_def/base_api/api_def_Restore.pbtxt
new file mode 100644
index 0000000000..816b79cf53
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Restore.pbtxt
@@ -0,0 +1,55 @@
+op {
+ graph_op_name: "Restore"
+ in_arg {
+ name: "file_pattern"
+ description: <<END
+Must have a single element. The pattern of the files from
+which we read the tensor.
+END
+ }
+ in_arg {
+ name: "tensor_name"
+ description: <<END
+Must have a single element. The name of the tensor to be
+restored.
+END
+ }
+ out_arg {
+ name: "tensor"
+ description: <<END
+The restored tensor.
+END
+ }
+ attr {
+ name: "dt"
+ description: <<END
+The type of the tensor to be restored.
+END
+ }
+ attr {
+ name: "preferred_shard"
+ description: <<END
+Index of file to open first if multiple files match
+`file_pattern`.
+END
+ }
+ summary: "Restores a tensor from checkpoint files."
+ description: <<END
+Reads a tensor stored in one or several files. If there are several files (for
+instance because a tensor was saved as slices), `file_pattern` may contain
+wildcard symbols (`*` and `?`) in the filename portion only, not in the
+directory portion.
+
+If a `file_pattern` matches several files, `preferred_shard` can be used to hint
+in which file the requested tensor is likely to be found. This op will first
+open the file at index `preferred_shard` in the list of matching files and try
+to restore tensors from that file. Only if some tensors or tensor slices are
+not found in that first file, then the Op opens all the files. Setting
+`preferred_shard` to match the value passed as the `shard` input
+of a matching `Save` Op may speed up Restore. This attribute only affects
+performance, not correctness. The default value -1 means files are processed in
+order.
+
+See also `RestoreSlice`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RestoreSlice.pbtxt b/tensorflow/core/api_def/base_api/api_def_RestoreSlice.pbtxt
new file mode 100644
index 0000000000..e57b1ea42d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RestoreSlice.pbtxt
@@ -0,0 +1,52 @@
+op {
+ graph_op_name: "RestoreSlice"
+ in_arg {
+ name: "file_pattern"
+ description: <<END
+Must have a single element. The pattern of the files from
+which we read the tensor.
+END
+ }
+ in_arg {
+ name: "tensor_name"
+ description: <<END
+Must have a single element. The name of the tensor to be
+restored.
+END
+ }
+ in_arg {
+ name: "shape_and_slice"
+ description: <<END
+Scalar. The shapes and slice specifications to use when
+restoring a tensors.
+END
+ }
+ out_arg {
+ name: "tensor"
+ description: <<END
+The restored tensor.
+END
+ }
+ attr {
+ name: "dt"
+ description: <<END
+The type of the tensor to be restored.
+END
+ }
+ attr {
+ name: "preferred_shard"
+ description: <<END
+Index of file to open first if multiple files match
+`file_pattern`. See the documentation for `Restore`.
+END
+ }
+ summary: "Restores a tensor from checkpoint files."
+ description: <<END
+This is like `Restore` except that restored tensor can be listed as filling
+only a slice of a larger tensor. `shape_and_slice` specifies the shape of the
+larger tensor and the slice that the restored tensor covers.
+
+The `shape_and_slice` input has the same format as the
+elements of the `shapes_and_slices` input of the `SaveSlices` op.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RestoreV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_RestoreV2.pbtxt
new file mode 100644
index 0000000000..5a64ef36d0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RestoreV2.pbtxt
@@ -0,0 +1,52 @@
+op {
+ graph_op_name: "RestoreV2"
+ in_arg {
+ name: "prefix"
+ description: <<END
+Must have a single element. The prefix of a V2 checkpoint.
+END
+ }
+ in_arg {
+ name: "tensor_names"
+ description: <<END
+shape {N}. The names of the tensors to be restored.
+END
+ }
+ in_arg {
+ name: "shape_and_slices"
+ description: <<END
+shape {N}. The slice specs of the tensors to be restored.
+Empty strings indicate that they are non-partitioned tensors.
+END
+ }
+ out_arg {
+ name: "tensors"
+ description: <<END
+shape {N}. The restored tensors, whose shapes are read from the
+checkpoint directly.
+END
+ }
+ attr {
+ name: "dtypes"
+ description: <<END
+shape {N}. The list of expected dtype for the tensors. Must match
+those stored in the checkpoint.
+END
+ }
+ summary: "Restores tensors from a V2 checkpoint."
+ description: <<END
+For backward compatibility with the V1 format, this Op currently allows
+restoring from a V1 checkpoint as well:
+ - This Op first attempts to find the V2 index file pointed to by "prefix", and
+ if found proceed to read it as a V2 checkpoint;
+ - Otherwise the V1 read path is invoked.
+Relying on this behavior is not recommended, as the ability to fall back to read
+V1 might be deprecated and eventually removed.
+
+By default, restores the named tensors in full. If the caller wishes to restore
+specific slices of stored tensors, "shape_and_slices" should be non-empty
+strings and correspondingly well-formed.
+
+Callers must ensure all the named tensors are indeed stored in the checkpoint.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Reverse.pbtxt b/tensorflow/core/api_def/base_api/api_def_Reverse.pbtxt
new file mode 100644
index 0000000000..83d7ee7798
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Reverse.pbtxt
@@ -0,0 +1,69 @@
+op {
+ graph_op_name: "Reverse"
+ visibility: SKIP
+ in_arg {
+ name: "tensor"
+ description: <<END
+Up to 8-D.
+END
+ }
+ in_arg {
+ name: "dims"
+ description: <<END
+1-D. The dimensions to reverse.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The same shape as `tensor`.
+END
+ }
+ summary: "Reverses specific dimensions of a tensor."
+ description: <<END
+Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
+of `tensor`, this operation reverses each dimension i of `tensor` where
+`dims[i]` is `True`.
+
+`tensor` can have up to 8 dimensions. The number of dimensions
+of `tensor` must equal the number of elements in `dims`. In other words:
+
+`rank(tensor) = size(dims)`
+
+For example:
+
+```
+# tensor 't' is [[[[ 0, 1, 2, 3],
+# [ 4, 5, 6, 7],
+# [ 8, 9, 10, 11]],
+# [[12, 13, 14, 15],
+# [16, 17, 18, 19],
+# [20, 21, 22, 23]]]]
+# tensor 't' shape is [1, 2, 3, 4]
+
+# 'dims' is [False, False, False, True]
+reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
+ [ 7, 6, 5, 4],
+ [ 11, 10, 9, 8]],
+ [[15, 14, 13, 12],
+ [19, 18, 17, 16],
+ [23, 22, 21, 20]]]]
+
+# 'dims' is [False, True, False, False]
+reverse(t, dims) ==> [[[[12, 13, 14, 15],
+ [16, 17, 18, 19],
+ [20, 21, 22, 23]
+ [[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]]]]
+
+# 'dims' is [False, False, True, False]
+reverse(t, dims) ==> [[[[8, 9, 10, 11],
+ [4, 5, 6, 7],
+ [0, 1, 2, 3]]
+ [[20, 21, 22, 23],
+ [16, 17, 18, 19],
+ [12, 13, 14, 15]]]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReverseSequence.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReverseSequence.pbtxt
new file mode 100644
index 0000000000..9ee4ead539
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReverseSequence.pbtxt
@@ -0,0 +1,91 @@
+op {
+ graph_op_name: "ReverseSequence"
+ in_arg {
+ name: "input"
+ description: <<END
+The input to reverse.
+END
+ }
+ in_arg {
+ name: "seq_lengths"
+ description: <<END
+1-D with length `input.dims(batch_dim)` and
+`max(seq_lengths) <= input.dims(seq_dim)`
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The partially reversed input. It has the same shape as `input`.
+END
+ }
+ attr {
+ name: "seq_dim"
+ description: <<END
+The dimension which is partially reversed.
+END
+ }
+ attr {
+ name: "batch_dim"
+ description: <<END
+The dimension along which reversal is performed.
+END
+ }
+ summary: "Reverses variable length slices."
+ description: <<END
+This op first slices `input` along the dimension `batch_dim`, and for each
+slice `i`, reverses the first `seq_lengths[i]` elements along
+the dimension `seq_dim`.
+
+The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
+and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
+
+The output slice `i` along dimension `batch_dim` is then given by input
+slice `i`, with the first `seq_lengths[i]` slices along dimension
+`seq_dim` reversed.
+
+For example:
+
+```
+# Given this:
+batch_dim = 0
+seq_dim = 1
+input.dims = (4, 8, ...)
+seq_lengths = [7, 2, 3, 5]
+
+# then slices of input are reversed on seq_dim, but only up to seq_lengths:
+output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
+output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
+output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
+output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
+
+# while entries past seq_lens are copied through:
+output[0, 7:, :, ...] = input[0, 7:, :, ...]
+output[1, 2:, :, ...] = input[1, 2:, :, ...]
+output[2, 3:, :, ...] = input[2, 3:, :, ...]
+output[3, 2:, :, ...] = input[3, 2:, :, ...]
+```
+
+In contrast, if:
+
+```
+# Given this:
+batch_dim = 2
+seq_dim = 0
+input.dims = (8, ?, 4, ...)
+seq_lengths = [7, 2, 3, 5]
+
+# then slices of input are reversed on seq_dim, but only up to seq_lengths:
+output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
+output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
+output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
+output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
+
+# while entries past seq_lens are copied through:
+output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
+output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
+output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
+output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ReverseV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ReverseV2.pbtxt
new file mode 100644
index 0000000000..0c9e4c29be
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ReverseV2.pbtxt
@@ -0,0 +1,74 @@
+op {
+ graph_op_name: "ReverseV2"
+ endpoint {
+ name: "Reverse"
+ }
+ in_arg {
+ name: "tensor"
+ description: <<END
+Up to 8-D.
+END
+ }
+ in_arg {
+ name: "axis"
+ description: <<END
+1-D. The indices of the dimensions to reverse. Must be in the range
+`[-rank(tensor), rank(tensor))`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The same shape as `tensor`.
+END
+ }
+ summary: "Reverses specific dimensions of a tensor."
+ description: <<END
+NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
+`tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
+
+Given a `tensor`, and a `int32` tensor `axis` representing the set of
+dimensions of `tensor` to reverse. This operation reverses each dimension
+`i` for which there exists `j` s.t. `axis[j] == i`.
+
+`tensor` can have up to 8 dimensions. The number of dimensions specified
+in `axis` may be 0 or more entries. If an index is specified more than
+once, a InvalidArgument error is raised.
+
+For example:
+
+```
+# tensor 't' is [[[[ 0, 1, 2, 3],
+# [ 4, 5, 6, 7],
+# [ 8, 9, 10, 11]],
+# [[12, 13, 14, 15],
+# [16, 17, 18, 19],
+# [20, 21, 22, 23]]]]
+# tensor 't' shape is [1, 2, 3, 4]
+
+# 'dims' is [3] or 'dims' is [-1]
+reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
+ [ 7, 6, 5, 4],
+ [ 11, 10, 9, 8]],
+ [[15, 14, 13, 12],
+ [19, 18, 17, 16],
+ [23, 22, 21, 20]]]]
+
+# 'dims' is '[1]' (or 'dims' is '[-3]')
+reverse(t, dims) ==> [[[[12, 13, 14, 15],
+ [16, 17, 18, 19],
+ [20, 21, 22, 23]
+ [[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]]]]
+
+# 'dims' is '[2]' (or 'dims' is '[-2]')
+reverse(t, dims) ==> [[[[8, 9, 10, 11],
+ [4, 5, 6, 7],
+ [0, 1, 2, 3]]
+ [[20, 21, 22, 23],
+ [16, 17, 18, 19],
+ [12, 13, 14, 15]]]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RightShift.pbtxt b/tensorflow/core/api_def/base_api/api_def_RightShift.pbtxt
new file mode 100644
index 0000000000..a7c56a00f1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RightShift.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "RightShift"
+ summary: "Elementwise computes the bitwise right-shift of `x` and `y`."
+ description: <<END
+Performs a logical shift for unsigned integer types, and an arithmetic shift
+for signed integer types.
+
+If `y` is negative, or greater than or equal to than the width of `x` in bits
+the result is implementation defined.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Rint.pbtxt b/tensorflow/core/api_def/base_api/api_def_Rint.pbtxt
new file mode 100644
index 0000000000..73699c9b6c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Rint.pbtxt
@@ -0,0 +1,15 @@
+op {
+ graph_op_name: "Rint"
+ summary: "Returns element-wise integer closest to x."
+ description: <<END
+If the result is midway between two representable values,
+the even representable is chosen.
+For example:
+
+```
+rint(-1.5) ==> -2.0
+rint(0.5000001) ==> 1.0
+rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Round.pbtxt b/tensorflow/core/api_def/base_api/api_def_Round.pbtxt
new file mode 100644
index 0000000000..2a7105eae7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Round.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Round"
+ summary: "Rounds the values of a tensor to the nearest integer, element-wise."
+ description: <<END
+Rounds half to even. Also known as bankers rounding. If you want to round
+according to the current system rounding mode use std::cint.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Rsqrt.pbtxt b/tensorflow/core/api_def/base_api/api_def_Rsqrt.pbtxt
new file mode 100644
index 0000000000..a7f768c505
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Rsqrt.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "Rsqrt"
+ summary: "Computes reciprocal of square root of x element-wise."
+ description: <<END
+I.e., \\(y = 1 / \sqrt{x}\\).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_RsqrtGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_RsqrtGrad.pbtxt
new file mode 100644
index 0000000000..501936c5c8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_RsqrtGrad.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "RsqrtGrad"
+ visibility: HIDDEN
+ summary: "Computes the gradient for the rsqrt of `x` wrt its input."
+ description: <<END
+Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
+is the corresponding input gradient.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_S.pbtxt b/tensorflow/core/api_def/base_api/api_def_S.pbtxt
deleted file mode 100644
index 9c53f9ac62..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_S.pbtxt
+++ /dev/null
@@ -1,2678 +0,0 @@
-op {
- graph_op_name: "SampleDistortedBoundingBox"
- endpoint {
- name: "SampleDistortedBoundingBox"
- }
- summary: "Generate a single randomly distorted bounding box for an image."
- description: <<END
-Bounding box annotations are often supplied in addition to ground-truth labels
-in image recognition or object localization tasks. A common technique for
-training such a system is to randomly distort an image while preserving
-its content, i.e. *data augmentation*. This Op outputs a randomly distorted
-localization of an object, i.e. bounding box, given an `image_size`,
-`bounding_boxes` and a series of constraints.
-
-The output of this Op is a single bounding box that may be used to crop the
-original image. The output is returned as 3 tensors: `begin`, `size` and
-`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
-image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
-what the bounding box looks like.
-
-Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
-bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
-height of the underlying image.
-
-For example,
-
-```python
- # Generate a single distorted bounding box.
- begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
- tf.shape(image),
- bounding_boxes=bounding_boxes)
-
- # Draw the bounding box in an image summary.
- image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
- bbox_for_draw)
- tf.image_summary('images_with_box', image_with_box)
-
- # Employ the bounding box to distort the image.
- distorted_image = tf.slice(image, begin, size)
-```
-
-Note that if no bounding box information is available, setting
-`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
-bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
-false and no bounding boxes are supplied, an error is raised.
-END
-}
-op {
- graph_op_name: "SampleDistortedBoundingBoxV2"
- endpoint {
- name: "SampleDistortedBoundingBoxV2"
- }
- summary: "Generate a single randomly distorted bounding box for an image."
- description: <<END
-Bounding box annotations are often supplied in addition to ground-truth labels
-in image recognition or object localization tasks. A common technique for
-training such a system is to randomly distort an image while preserving
-its content, i.e. *data augmentation*. This Op outputs a randomly distorted
-localization of an object, i.e. bounding box, given an `image_size`,
-`bounding_boxes` and a series of constraints.
-
-The output of this Op is a single bounding box that may be used to crop the
-original image. The output is returned as 3 tensors: `begin`, `size` and
-`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
-image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
-what the bounding box looks like.
-
-Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
-bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
-height of the underlying image.
-
-For example,
-
-```python
- # Generate a single distorted bounding box.
- begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
- tf.shape(image),
- bounding_boxes=bounding_boxes)
-
- # Draw the bounding box in an image summary.
- image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
- bbox_for_draw)
- tf.image_summary('images_with_box', image_with_box)
-
- # Employ the bounding box to distort the image.
- distorted_image = tf.slice(image, begin, size)
-```
-
-Note that if no bounding box information is available, setting
-`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
-bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
-false and no bounding boxes are supplied, an error is raised.
-END
-}
-op {
- graph_op_name: "Save"
- endpoint {
- name: "Save"
- }
- summary: "Saves the input tensors to disk."
- description: <<END
-The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
-is written to `filename` with name `tensor_names[i]`.
-
-See also `SaveSlices`.
-END
-}
-op {
- graph_op_name: "SaveIterator"
- endpoint {
- name: "SaveIterator"
- }
- summary: "Saves the state of the `iterator` at `path`."
- description: <<END
-This state can be restored using "RestoreIterator".
-END
-}
-op {
- graph_op_name: "SaveSlices"
- endpoint {
- name: "SaveSlices"
- }
- summary: "Saves input tensors slices to disk."
- description: <<END
-This is like `Save` except that tensors can be listed in the saved file as being
-a slice of a larger tensor. `shapes_and_slices` specifies the shape of the
-larger tensor and the slice that this tensor covers. `shapes_and_slices` must
-have as many elements as `tensor_names`.
-
-Elements of the `shapes_and_slices` input must either be:
-
-* The empty string, in which case the corresponding tensor is
- saved normally.
-* A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
- `dimI` are the dimensions of the larger tensor and `slice-spec`
- specifies what part is covered by the tensor to save.
-
-`slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
-where each `sliceI` is either:
-
-* The string `-` meaning that the slice covers all indices of this dimension
-* `start,length` where `start` and `length` are integers. In that
- case the slice covers `length` indices starting at `start`.
-
-See also `Save`.
-END
-}
-op {
- graph_op_name: "SaveV2"
- endpoint {
- name: "SaveV2"
- }
- summary: "Saves tensors in V2 checkpoint format."
- description: <<END
-By default, saves the named tensors in full. If the caller wishes to save
-specific slices of full tensors, "shape_and_slices" should be non-empty strings
-and correspondingly well-formed.
-END
-}
-op {
- graph_op_name: "ScalarSummary"
- endpoint {
- name: "ScalarSummary"
- }
- summary: "Outputs a `Summary` protocol buffer with scalar values."
- description: <<END
-The input `tags` and `values` must have the same shape. The generated summary
-has a summary value for each tag-value pair in `tags` and `values`.
-END
-}
-op {
- graph_op_name: "ScatterAdd"
- endpoint {
- name: "ScatterAdd"
- }
- summary: "Adds sparse updates to a variable reference."
- description: <<END
-This operation computes
-
- # Scalar indices
- ref[indices, ...] += updates[...]
-
- # Vector indices (for each i)
- ref[indices[i], ...] += updates[i, ...]
-
- # High rank indices (for each i, ..., j)
- ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
-
-This operation outputs `ref` after the update is done.
-This makes it easier to chain operations that need to use the reset value.
-
-Duplicate entries are handled correctly: if multiple `indices` reference
-the same location, their contributions add.
-
-Requires `updates.shape = indices.shape + ref.shape[1:]`.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "ScatterDiv"
- endpoint {
- name: "ScatterDiv"
- }
- summary: "Divides a variable reference by sparse updates."
- description: <<END
-This operation computes
-
-```python
- # Scalar indices
- ref[indices, ...] /= updates[...]
-
- # Vector indices (for each i)
- ref[indices[i], ...] /= updates[i, ...]
-
- # High rank indices (for each i, ..., j)
- ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
-```
-
-This operation outputs `ref` after the update is done.
-This makes it easier to chain operations that need to use the reset value.
-
-Duplicate entries are handled correctly: if multiple `indices` reference
-the same location, their contributions divide.
-
-Requires `updates.shape = indices.shape + ref.shape[1:]`.
-END
-}
-op {
- graph_op_name: "ScatterMul"
- endpoint {
- name: "ScatterMul"
- }
- summary: "Multiplies sparse updates into a variable reference."
- description: <<END
-This operation computes
-
-```python
- # Scalar indices
- ref[indices, ...] *= updates[...]
-
- # Vector indices (for each i)
- ref[indices[i], ...] *= updates[i, ...]
-
- # High rank indices (for each i, ..., j)
- ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
-```
-
-This operation outputs `ref` after the update is done.
-This makes it easier to chain operations that need to use the reset value.
-
-Duplicate entries are handled correctly: if multiple `indices` reference
-the same location, their contributions multiply.
-
-Requires `updates.shape = indices.shape + ref.shape[1:]`.
-END
-}
-op {
- graph_op_name: "ScatterNd"
- endpoint {
- name: "ScatterNd"
- }
- summary: "Scatter `updates` into a new (initially zero) tensor according to `indices`."
- description: <<END
-Creates a new tensor by applying sparse `updates` to individual
-values or slices within a zero tensor of the given `shape` according to
-indices. This operator is the inverse of the @{tf.gather_nd} operator which
-extracts values or slices from a given tensor.
-
-**WARNING**: The order in which updates are applied is nondeterministic, so the
-output will be nondeterministic if `indices` contains duplicates.
-
-`indices` is an integer tensor containing indices into a new tensor of shape
-`shape`. The last dimension of `indices` can be at most the rank of `shape`:
-
- indices.shape[-1] <= shape.rank
-
-The last dimension of `indices` corresponds to indices into elements
-(if `indices.shape[-1] = shape.rank`) or slices
-(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
-`shape`. `updates` is a tensor with shape
-
- indices.shape[:-1] + shape[indices.shape[-1]:]
-
-The simplest form of scatter is to insert individual elements in a tensor by
-index. For example, say we want to insert 4 scattered elements in a rank-1
-tensor with 8 elements.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
-</div>
-
-In Python, this scatter operation would look like this:
-
-```python
- indices = tf.constant([[4], [3], [1], [7]])
- updates = tf.constant([9, 10, 11, 12])
- shape = tf.constant([8])
- scatter = tf.scatter_nd(indices, updates, shape)
- with tf.Session() as sess:
- print(sess.run(scatter))
-```
-
-The resulting tensor would look like this:
-
- [0, 11, 0, 10, 9, 0, 0, 12]
-
-We can also, insert entire slices of a higher rank tensor all at once. For
-example, if we wanted to insert two slices in the first dimension of a
-rank-3 tensor with two matrices of new values.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
-</div>
-
-In Python, this scatter operation would look like this:
-
-```python
- indices = tf.constant([[0], [2]])
- updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
- [7, 7, 7, 7], [8, 8, 8, 8]],
- [[5, 5, 5, 5], [6, 6, 6, 6],
- [7, 7, 7, 7], [8, 8, 8, 8]]])
- shape = tf.constant([4, 4, 4])
- scatter = tf.scatter_nd(indices, updates, shape)
- with tf.Session() as sess:
- print(sess.run(scatter))
-```
-
-The resulting tensor would look like this:
-
- [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
- [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
- [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
- [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
-END
-}
-op {
- graph_op_name: "ScatterNdAdd"
- endpoint {
- name: "ScatterNdAdd"
- }
- summary: "Applies sparse addition between `updates` and individual values or slices"
- description: <<END
-within a given variable according to `indices`.
-
-`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
-
-`indices` must be integer tensor, containing indices into `ref`.
-It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
-
-The innermost dimension of `indices` (with length `K`) corresponds to
-indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
-dimension of `ref`.
-
-`updates` is `Tensor` of rank `Q-1+P-K` with shape:
-
-```
-[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
-```
-
-For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
-elements. In Python, that addition would look like this:
-
- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
- indices = tf.constant([[4], [3], [1], [7]])
- updates = tf.constant([9, 10, 11, 12])
- add = tf.scatter_nd_add(ref, indices, updates)
- with tf.Session() as sess:
- print sess.run(add)
-
-The resulting update to ref would look like this:
-
- [1, 13, 3, 14, 14, 6, 7, 20]
-
-See @{tf.scatter_nd} for more details about how to make updates to
-slices.
-END
-}
-op {
- graph_op_name: "ScatterNdNonAliasingAdd"
- endpoint {
- name: "ScatterNdNonAliasingAdd"
- }
- summary: "Applies sparse addition to `input` using individual values or slices"
- description: <<END
-from `updates` according to indices `indices`. The updates are non-aliasing:
-`input` is only modified in-place if no other operations will use it.
-Otherwise, a copy of `input` is made. This operation has a gradient with
-respect to both `input` and `updates`.
-
-`input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
-
-`indices` must be integer tensor, containing indices into `input`.
-It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
-
-The innermost dimension of `indices` (with length `K`) corresponds to
-indices into elements (if `K = P`) or `(P-K)`-dimensional slices
-(if `K < P`) along the `K`th dimension of `input`.
-
-`updates` is `Tensor` of rank `Q-1+P-K` with shape:
-
-```
-[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].
-```
-
-For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
-elements. In Python, that addition would look like this:
-
- input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
- indices = tf.constant([[4], [3], [1], [7]])
- updates = tf.constant([9, 10, 11, 12])
- output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
- with tf.Session() as sess:
- print(sess.run(output))
-
-The resulting value `output` would look like this:
-
- [1, 13, 3, 14, 14, 6, 7, 20]
-
-See @{tf.scatter_nd} for more details about how to make updates to slices.
-END
-}
-op {
- graph_op_name: "ScatterNdSub"
- endpoint {
- name: "ScatterNdSub"
- }
- summary: "Applies sparse subtraction between `updates` and individual values or slices"
- description: <<END
-within a given variable according to `indices`.
-
-`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
-
-`indices` must be integer tensor, containing indices into `ref`.
-It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
-
-The innermost dimension of `indices` (with length `K`) corresponds to
-indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
-dimension of `ref`.
-
-`updates` is `Tensor` of rank `Q-1+P-K` with shape:
-
-```
-[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
-```
-
-For example, say we want to subtract 4 scattered elements from a rank-1 tensor
-with 8 elements. In Python, that subtraction would look like this:
-
- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
- indices = tf.constant([[4], [3], [1], [7]])
- updates = tf.constant([9, 10, 11, 12])
- sub = tf.scatter_nd_sub(ref, indices, updates)
- with tf.Session() as sess:
- print sess.run(sub)
-
-The resulting update to ref would look like this:
-
- [1, -9, 3, -6, -4, 6, 7, -4]
-
-See @{tf.scatter_nd} for more details about how to make updates to
-slices.
-END
-}
-op {
- graph_op_name: "ScatterNdUpdate"
- endpoint {
- name: "ScatterNdUpdate"
- }
- summary: "Applies sparse `updates` to individual values or slices within a given"
- description: <<END
-variable according to `indices`.
-
-`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
-
-`indices` must be integer tensor, containing indices into `ref`.
-It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
-
-The innermost dimension of `indices` (with length `K`) corresponds to
-indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
-dimension of `ref`.
-
-`updates` is `Tensor` of rank `Q-1+P-K` with shape:
-
-```
-[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
-```
-
-For example, say we want to update 4 scattered elements to a rank-1 tensor to
-8 elements. In Python, that update would look like this:
-
-```python
- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
- indices = tf.constant([[4], [3], [1] ,[7]])
- updates = tf.constant([9, 10, 11, 12])
- update = tf.scatter_nd_update(ref, indices, updates)
- with tf.Session() as sess:
- print sess.run(update)
-```
-
-The resulting update to ref would look like this:
-
- [1, 11, 3, 10, 9, 6, 7, 12]
-
-See @{tf.scatter_nd} for more details about how to make updates to
-slices.
-END
-}
-op {
- graph_op_name: "ScatterSub"
- endpoint {
- name: "ScatterSub"
- }
- summary: "Subtracts sparse updates to a variable reference."
- description: <<END
-```python
- # Scalar indices
- ref[indices, ...] -= updates[...]
-
- # Vector indices (for each i)
- ref[indices[i], ...] -= updates[i, ...]
-
- # High rank indices (for each i, ..., j)
- ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
-```
-
-This operation outputs `ref` after the update is done.
-This makes it easier to chain operations that need to use the reset value.
-
-Duplicate entries are handled correctly: if multiple `indices` reference
-the same location, their (negated) contributions add.
-
-Requires `updates.shape = indices.shape + ref.shape[1:]`.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/ScatterSub.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "ScatterUpdate"
- endpoint {
- name: "ScatterUpdate"
- }
- summary: "Applies sparse updates to a variable reference."
- description: <<END
-This operation computes
-
-```python
- # Scalar indices
- ref[indices, ...] = updates[...]
-
- # Vector indices (for each i)
- ref[indices[i], ...] = updates[i, ...]
-
- # High rank indices (for each i, ..., j)
- ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
-```
-
-This operation outputs `ref` after the update is done.
-This makes it easier to chain operations that need to use the reset value.
-
-If values in `ref` is to be updated more than once, because there are
-duplicate entries in `indices`, the order at which the updates happen
-for each value is undefined.
-
-Requires `updates.shape = indices.shape + ref.shape[1:]`.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "SdcaFprint"
- endpoint {
- name: "SdcaFprint"
- }
- summary: "Computes fingerprints of the input strings."
-}
-op {
- graph_op_name: "SdcaOptimizer"
- endpoint {
- name: "SdcaOptimizer"
- }
- summary: "Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for"
- description: <<END
-linear models with L1 + L2 regularization. As global optimization objective is
-strongly-convex, the optimizer optimizes the dual objective at each step. The
-optimizer applies each update one example at a time. Examples are sampled
-uniformly, and the optimizer is learning rate free and enjoys linear convergence
-rate.
-
-[Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
-Shai Shalev-Shwartz, Tong Zhang. 2012
-
-$$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
-
-[Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
-Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
-Peter Richtarik, Martin Takac. 2015
-
-[Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
-Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
-END
-}
-op {
- graph_op_name: "SdcaShrinkL1"
- endpoint {
- name: "SdcaShrinkL1"
- }
- summary: "Applies L1 regularization shrink step on the parameters."
-}
-op {
- graph_op_name: "SegmentMax"
- endpoint {
- name: "SegmentMax"
- }
- summary: "Computes the maximum along segments of a tensor."
- description: <<END
-Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-segments.
-
-Computes a tensor such that
-\\(output_i = \max_j(data_j)\\) where `max` is over `j` such
-that `segment_ids[j] == i`.
-
-If the max is empty for a given segment ID `i`, `output[i] = 0`.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "SegmentMean"
- endpoint {
- name: "SegmentMean"
- }
- summary: "Computes the mean along segments of a tensor."
- description: <<END
-Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-segments.
-
-Computes a tensor such that
-\\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
-over `j` such that `segment_ids[j] == i` and `N` is the total number of
-values summed.
-
-If the mean is empty for a given segment ID `i`, `output[i] = 0`.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "SegmentMin"
- endpoint {
- name: "SegmentMin"
- }
- summary: "Computes the minimum along segments of a tensor."
- description: <<END
-Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-segments.
-
-Computes a tensor such that
-\\(output_i = \min_j(data_j)\\) where `min` is over `j` such
-that `segment_ids[j] == i`.
-
-If the min is empty for a given segment ID `i`, `output[i] = 0`.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "SegmentProd"
- endpoint {
- name: "SegmentProd"
- }
- summary: "Computes the product along segments of a tensor."
- description: <<END
-Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-segments.
-
-Computes a tensor such that
-\\(output_i = \prod_j data_j\\) where the product is over `j` such
-that `segment_ids[j] == i`.
-
-If the product is empty for a given segment ID `i`, `output[i] = 1`.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "SegmentSum"
- endpoint {
- name: "SegmentSum"
- }
- summary: "Computes the sum along segments of a tensor."
- description: <<END
-Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-segments.
-
-Computes a tensor such that
-\\(output_i = \sum_j data_j\\) where sum is over `j` such
-that `segment_ids[j] == i`.
-
-If the sum is empty for a given segment ID `i`, `output[i] = 0`.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "Select"
- endpoint {
- name: "Select"
- }
- summary: "Selects elements from `t` or `e`, depending on `condition`."
- description: <<END
-The `t`, and `e` tensors must all have the same shape, and the
-output will also have that shape.
-
-The `condition` tensor must be a scalar if `t` and `e` are scalars.
-If `t` and `e` are vectors or higher rank, then `condition` must be either a
-scalar, a vector with size matching the first dimension of `t`, or must have
-the same shape as `t`.
-
-The `condition` tensor acts as a mask that chooses, based on the value at each
-element, whether the corresponding element / row in the output should be
-taken from `t` (if true) or `e` (if false).
-
-If `condition` is a vector and `t` and `e` are higher rank matrices, then
-it chooses which row (outer dimension) to copy from `t` and `e`.
-If `condition` has the same shape as `t` and `e`, then it chooses which
-element to copy from `t` and `e`.
-
-For example:
-
-```python
-# 'condition' tensor is [[True, False]
-# [False, True]]
-# 't' is [[1, 2],
-# [3, 4]]
-# 'e' is [[5, 6],
-# [7, 8]]
-select(condition, t, e) # => [[1, 6], [7, 4]]
-
-
-# 'condition' tensor is [True, False]
-# 't' is [[1, 2],
-# [3, 4]]
-# 'e' is [[5, 6],
-# [7, 8]]
-select(condition, t, e) ==> [[1, 2],
- [7, 8]]
-
-```
-END
-}
-op {
- graph_op_name: "SelfAdjointEig"
- endpoint {
- name: "SelfAdjointEig"
- }
- summary: "Computes the Eigen Decomposition of a batch of square self-adjoint matrices."
- description: <<END
-The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-form square matrices, with the same constraints as the single matrix
-SelfAdjointEig.
-
-The result is a [..., M+1, M] matrix with [..., 0,:] containing the
-eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.
-END
-}
-op {
- graph_op_name: "SelfAdjointEigV2"
- endpoint {
- name: "SelfAdjointEigV2"
- }
- summary: "Computes the eigen decomposition of one or more square self-adjoint matrices."
- description: <<END
-Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
-`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.
-
-```python
-# a is a tensor.
-# e is a tensor of eigenvalues.
-# v is a tensor of eigenvectors.
-e, v = self_adjoint_eig(a)
-e = self_adjoint_eig(a, compute_v=False)
-```
-END
-}
-op {
- graph_op_name: "Selu"
- endpoint {
- name: "Selu"
- }
- summary: "Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`"
- description: <<END
-if < 0, `scale * features` otherwise.
-
-See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
-END
-}
-op {
- graph_op_name: "SeluGrad"
- endpoint {
- name: "SeluGrad"
- }
- summary: "Computes gradients for the scaled exponential linear (Selu) operation."
-}
-op {
- graph_op_name: "SerializeManySparse"
- endpoint {
- name: "SerializeManySparse"
- }
- summary: "Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`."
- description: <<END
-The `SparseTensor` must have rank `R` greater than 1, and the first dimension
-is treated as the minibatch dimension. Elements of the `SparseTensor`
-must be sorted in increasing order of this first dimension. The serialized
-`SparseTensor` objects going into each row of `serialized_sparse` will have
-rank `R-1`.
-
-The minibatch size `N` is extracted from `sparse_shape[0]`.
-END
-}
-op {
- graph_op_name: "SerializeSparse"
- endpoint {
- name: "SerializeSparse"
- }
- summary: "Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object."
-}
-op {
- graph_op_name: "SerializeTensor"
- endpoint {
- name: "SerializeTensor"
- }
- summary: "Transforms a Tensor into a serialized TensorProto proto."
-}
-op {
- graph_op_name: "SetSize"
- endpoint {
- name: "SetSize"
- }
- summary: "Number of unique elements along last dimension of input `set`."
- description: <<END
-Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
-and `set_shape`. The last dimension contains values in a set, duplicates are
-allowed but ignored.
-
-If `validate_indices` is `True`, this op validates the order and range of `set`
-indices.
-END
-}
-op {
- graph_op_name: "Shape"
- endpoint {
- name: "Shape"
- }
- summary: "Returns the shape of a tensor."
- description: <<END
-This operation returns a 1-D integer tensor representing the shape of `input`.
-
-For example:
-
-```
-# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
-shape(t) ==> [2, 2, 3]
-```
-END
-}
-op {
- graph_op_name: "ShapeN"
- endpoint {
- name: "ShapeN"
- }
- summary: "Returns shape of tensors."
- description: <<END
-This operation returns N 1-D integer tensors representing shape of `input[i]s`.
-END
-}
-op {
- graph_op_name: "ShardedFilename"
- endpoint {
- name: "ShardedFilename"
- }
- summary: "Generate a sharded filename. The filename is printf formatted as"
- description: <<END
- %s-%05d-of-%05d, basename, shard, num_shards.
-END
-}
-op {
- graph_op_name: "ShardedFilespec"
- endpoint {
- name: "ShardedFilespec"
- }
- summary: "Generate a glob pattern matching all sharded file names."
-}
-op {
- graph_op_name: "ShuffleDataset"
- endpoint {
- name: "ShuffleDataset"
- }
- summary: "Creates a dataset that shuffles elements from `input_dataset` pseudorandomly."
-}
-op {
- graph_op_name: "Sigmoid"
- endpoint {
- name: "Sigmoid"
- }
- summary: "Computes sigmoid of `x` element-wise."
- description: <<END
-Specifically, `y = 1 / (1 + exp(-x))`.
-END
-}
-op {
- graph_op_name: "SigmoidGrad"
- endpoint {
- name: "SigmoidGrad"
- }
- summary: "Computes the gradient of the sigmoid of `x` wrt its input."
- description: <<END
-Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
-`dy` is the corresponding input gradient.
-END
-}
-op {
- graph_op_name: "Sign"
- endpoint {
- name: "Sign"
- }
- summary: "Returns an element-wise indication of the sign of a number."
- description: <<END
-`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
-
-For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
-END
-}
-op {
- graph_op_name: "Sin"
- endpoint {
- name: "Sin"
- }
- summary: "Computes sin of x element-wise."
-}
-op {
- graph_op_name: "Sinh"
- endpoint {
- name: "Sinh"
- }
- summary: "Computes hyperbolic sine of x element-wise."
-}
-op {
- graph_op_name: "Size"
- endpoint {
- name: "Size"
- }
- summary: "Returns the size of a tensor."
- description: <<END
-This operation returns an integer representing the number of elements in
-`input`.
-
-For example:
-
-```
-# 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
-size(t) ==> 12
-```
-END
-}
-op {
- graph_op_name: "SkipDataset"
- endpoint {
- name: "SkipDataset"
- }
- summary: "Creates a dataset that skips `count` elements from the `input_dataset`."
-}
-op {
- graph_op_name: "Skipgram"
- endpoint {
- name: "Skipgram"
- }
- summary: "Parses a text file and creates a batch of examples."
-}
-op {
- graph_op_name: "Slice"
- endpoint {
- name: "Slice"
- }
- summary: "Return a slice from \'input\'."
- description: <<END
-The output tensor is a tensor with dimensions described by 'size'
-whose values are extracted from 'input' starting at the offsets in
-'begin'.
-
-*Requirements*:
- 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)
-END
-}
-op {
- graph_op_name: "SloppyInterleaveDataset"
- endpoint {
- name: "SloppyInterleaveDataset"
- }
- summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
- description: <<END
-The resulting dataset is similar to the `InterleaveDataset`, with the exception
-that if retrieving the next value from a dataset would cause the requester to
-block, it will skip that input dataset. This dataset is especially useful
-when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it
-allows the training step to proceed so long as some data is available.
-
-!! WARNING !! This dataset is not deterministic!
-END
-}
-op {
- graph_op_name: "Softmax"
- endpoint {
- name: "Softmax"
- }
- summary: "Computes softmax activations."
- description: <<END
-For each batch `i` and class `j` we have
-
- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
-END
-}
-op {
- graph_op_name: "SoftmaxCrossEntropyWithLogits"
- endpoint {
- name: "SoftmaxCrossEntropyWithLogits"
- }
- summary: "Computes softmax cross entropy cost and gradients to backpropagate."
- description: <<END
-Inputs are the logits, not probabilities.
-END
-}
-op {
- graph_op_name: "Softplus"
- endpoint {
- name: "Softplus"
- }
- summary: "Computes softplus: `log(exp(features) + 1)`."
-}
-op {
- graph_op_name: "SoftplusGrad"
- endpoint {
- name: "SoftplusGrad"
- }
- summary: "Computes softplus gradients for a softplus operation."
-}
-op {
- graph_op_name: "Softsign"
- endpoint {
- name: "Softsign"
- }
- summary: "Computes softsign: `features / (abs(features) + 1)`."
-}
-op {
- graph_op_name: "SoftsignGrad"
- endpoint {
- name: "SoftsignGrad"
- }
- summary: "Computes softsign gradients for a softsign operation."
-}
-op {
- graph_op_name: "SpaceToBatch"
- endpoint {
- name: "SpaceToBatch"
- }
- summary: "SpaceToBatch for 4-D tensors of type T."
- description: <<END
-This is a legacy version of the more general SpaceToBatchND.
-
-Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
-More specifically, this op outputs a copy of the input tensor where values from
-the `height` and `width` dimensions are moved to the `batch` dimension. After
-the zero-padding, both `height` and `width` of the input must be divisible by the
-block size.
-END
-}
-op {
- graph_op_name: "SpaceToBatchND"
- endpoint {
- name: "SpaceToBatchND"
- }
- summary: "SpaceToBatch for N-D tensors of type T."
- description: <<END
-This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
-grid of blocks of shape `block_shape`, and interleaves these blocks with the
-"batch" dimension (0) such that in the output, the spatial dimensions
-`[1, ..., M]` correspond to the position within the grid, and the batch
-dimension combines both the position within a spatial block and the original
-batch position. Prior to division into blocks, the spatial dimensions of the
-input are optionally zero padded according to `paddings`. See below for a
-precise description.
-END
-}
-op {
- graph_op_name: "SpaceToDepth"
- endpoint {
- name: "SpaceToDepth"
- }
- summary: "SpaceToDepth for tensors of type T."
- description: <<END
-Rearranges blocks of spatial data, into depth. More specifically,
-this op outputs a copy of the input tensor where values from the `height`
-and `width` dimensions are moved to the `depth` dimension.
-The attr `block_size` indicates the input block size.
-
- * Non-overlapping blocks of size `block_size x block size` are rearranged
- into depth at each location.
- * The depth of the output tensor is `block_size * block_size * input_depth`.
- * The Y, X coordinates within each block of the input become the high order
- component of the output channel index.
- * The input tensor's height and width must be divisible by block_size.
-
-The `data_format` attr specifies the layout of the input and output tensors
-with the following options:
- "NHWC": `[ batch, height, width, channels ]`
- "NCHW": `[ batch, channels, height, width ]`
- "NCHW_VECT_C":
- `qint8 [ batch, channels / 4, height, width, channels % 4 ]`
-
-It is useful to consider the operation as transforming a 6-D Tensor.
-e.g. for data_format = NHWC,
- Each element in the input tensor can be specified via 6 coordinates,
- ordered by decreasing memory layout significance as:
- n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates
- within the output image, bX, bY means coordinates
- within the input block, iC means input channels).
- The output would be a transpose to the following layout:
- n,oY,oX,bY,bX,iC
-
-This operation is useful for resizing the activations between convolutions
-(but keeping all data), e.g. instead of pooling. It is also useful for training
-purely convolutional models.
-
-For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
-block_size = 2:
-
-```
-x = [[[[1], [2]],
- [[3], [4]]]]
-```
-
-This operation will output a tensor of shape `[1, 1, 1, 4]`:
-
-```
-[[[[1, 2, 3, 4]]]]
-```
-
-Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
-the corresponding output will have a single element (i.e. width and height are
-both 1) and will have a depth of 4 channels (1 * block_size * block_size).
-The output element shape is `[1, 1, 4]`.
-
-For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
-
-```
-x = [[[[1, 2, 3], [4, 5, 6]],
- [[7, 8, 9], [10, 11, 12]]]]
-```
-
-This operation, for block_size of 2, will return the following tensor of shape
-`[1, 1, 1, 12]`
-
-```
-[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
-```
-
-Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
-
-```
-x = [[[[1], [2], [5], [6]],
- [[3], [4], [7], [8]],
- [[9], [10], [13], [14]],
- [[11], [12], [15], [16]]]]
-```
-
-the operator will return the following tensor of shape `[1 2 2 4]`:
-
-```
-x = [[[[1, 2, 3, 4],
- [5, 6, 7, 8]],
- [[9, 10, 11, 12],
- [13, 14, 15, 16]]]]
-```
-END
-}
-op {
- graph_op_name: "SparseAccumulatorApplyGradient"
- endpoint {
- name: "SparseAccumulatorApplyGradient"
- }
- summary: "Applies a sparse gradient to a given accumulator."
- description: <<END
-Does not add if local_step is smaller than the accumulator's
-global_step.
-END
-}
-op {
- graph_op_name: "SparseAccumulatorTakeGradient"
- endpoint {
- name: "SparseAccumulatorTakeGradient"
- }
- summary: "Extracts the average sparse gradient in a SparseConditionalAccumulator."
- description: <<END
-The op will blocks until sufficient (i.e., more than num_required)
-gradients have been accumulated. If the accumulator has already
-aggregated more than num_required gradients, it will return its
-average of the accumulated gradients. Also automatically increments
-the recorded global_step in the accumulator by 1, and resets the
-aggregate to 0.
-END
-}
-op {
- graph_op_name: "SparseAdd"
- endpoint {
- name: "SparseAdd"
- }
- summary: "Adds two `SparseTensor` objects to produce another `SparseTensor`."
- description: <<END
-The input `SparseTensor` objects' indices are assumed ordered in standard
-lexicographic order. If this is not the case, before this step run
-`SparseReorder` to restore index ordering.
-
-By default, if two values sum to zero at some index, the output `SparseTensor`
-would still include that particular location in its index, storing a zero in the
-corresponding value slot. To override this, callers can specify `thresh`,
-indicating that if the sum has a magnitude strictly smaller than `thresh`, its
-corresponding value and index would then not be included. In particular,
-`thresh == 0` (default) means everything is kept and actual thresholding happens
-only for a positive value.
-
-In the following shapes, `nnz` is the count after taking `thresh` into account.
-END
-}
-op {
- graph_op_name: "SparseAddGrad"
- endpoint {
- name: "SparseAddGrad"
- }
- summary: "The gradient operator for the SparseAdd op."
- description: <<END
-The SparseAdd op calculates A + B, where A, B, and the sum are all represented
-as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
-non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
-values of A and B.
-END
-}
-op {
- graph_op_name: "SparseApplyAdadelta"
- endpoint {
- name: "SparseApplyAdadelta"
- }
- summary: "var: Should be from a Variable()."
-}
-op {
- graph_op_name: "SparseApplyAdagrad"
- endpoint {
- name: "SparseApplyAdagrad"
- }
- summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
- description: <<END
-That is for rows we have grad for, we update var and accum as follows:
-accum += grad * grad
-var -= lr * grad * (1 / sqrt(accum))
-END
-}
-op {
- graph_op_name: "SparseApplyAdagradDA"
- endpoint {
- name: "SparseApplyAdagradDA"
- }
- summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme."
-}
-op {
- graph_op_name: "SparseApplyCenteredRMSProp"
- endpoint {
- name: "SparseApplyCenteredRMSProp"
- }
- summary: "Update \'*var\' according to the centered RMSProp algorithm."
- description: <<END
-The centered RMSProp algorithm uses an estimate of the centered second moment
-(i.e., the variance) for normalization, as opposed to regular RMSProp, which
-uses the (uncentered) second moment. This often helps with training, but is
-slightly more expensive in terms of computation and memory.
-
-Note that in dense implementation of this algorithm, mg, ms, and mom will
-update even if the grad is zero, but in this sparse implementation, mg, ms,
-and mom will not update in iterations during which the grad is zero.
-
-mean_square = decay * mean_square + (1-decay) * gradient ** 2
-mean_grad = decay * mean_grad + (1-decay) * gradient
-Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
-
-ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
-var <- var - mom
-END
-}
-op {
- graph_op_name: "SparseApplyFtrl"
- endpoint {
- name: "SparseApplyFtrl"
- }
- summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
- description: <<END
-That is for rows we have grad for, we update var, accum and linear as follows:
-accum_new = accum + grad * grad
-linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-accum = accum_new
-END
-}
-op {
- graph_op_name: "SparseApplyFtrlV2"
- endpoint {
- name: "SparseApplyFtrlV2"
- }
- summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
- description: <<END
-That is for rows we have grad for, we update var, accum and linear as follows:
-grad_with_shrinkage = grad + 2 * l2_shrinkage * var
-accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
-linear += grad_with_shrinkage +
- (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-accum = accum_new
-END
-}
-op {
- graph_op_name: "SparseApplyMomentum"
- endpoint {
- name: "SparseApplyMomentum"
- }
- summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme."
- description: <<END
-Set use_nesterov = True if you want to use Nesterov momentum.
-
-That is for rows we have grad for, we update var and accum as follows:
-
-accum = accum * momentum + grad
-var -= lr * accum
-END
-}
-op {
- graph_op_name: "SparseApplyProximalAdagrad"
- endpoint {
- name: "SparseApplyProximalAdagrad"
- }
- summary: "Sparse update entries in \'*var\' and \'*accum\' according to FOBOS algorithm."
- description: <<END
-That is for rows we have grad for, we update var and accum as follows:
-accum += grad * grad
-prox_v = var
-prox_v -= lr * grad * (1 / sqrt(accum))
-var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
-END
-}
-op {
- graph_op_name: "SparseApplyProximalGradientDescent"
- endpoint {
- name: "SparseApplyProximalGradientDescent"
- }
- summary: "Sparse update \'*var\' as FOBOS algorithm with fixed learning rate."
- description: <<END
-That is for rows we have grad for, we update var as follows:
-prox_v = var - alpha * grad
-var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
-END
-}
-op {
- graph_op_name: "SparseApplyRMSProp"
- endpoint {
- name: "SparseApplyRMSProp"
- }
- summary: "Update \'*var\' according to the RMSProp algorithm."
- description: <<END
-Note that in dense implementation of this algorithm, ms and mom will
-update even if the grad is zero, but in this sparse implementation, ms
-and mom will not update in iterations during which the grad is zero.
-
-mean_square = decay * mean_square + (1-decay) * gradient ** 2
-Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
-
-ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
-var <- var - mom
-END
-}
-op {
- graph_op_name: "SparseConcat"
- endpoint {
- name: "SparseConcat"
- }
- summary: "Concatenates a list of `SparseTensor` along the specified dimension."
- description: <<END
-Concatenation is with respect to the dense versions of these sparse tensors.
-It is assumed that each input is a `SparseTensor` whose elements are ordered
-along increasing dimension number.
-
-All inputs' shapes must match, except for the concat dimension. The
-`indices`, `values`, and `shapes` lists must have the same length.
-
-The output shape is identical to the inputs', except along the concat
-dimension, where it is the sum of the inputs' sizes along that dimension.
-
-The output elements will be resorted to preserve the sort order along
-increasing dimension number.
-
-This op runs in `O(M log M)` time, where `M` is the total number of non-empty
-values across all inputs. This is due to the need for an internal sort in
-order to concatenate efficiently across an arbitrary dimension.
-
-For example, if `concat_dim = 1` and the inputs are
-
- sp_inputs[0]: shape = [2, 3]
- [0, 2]: "a"
- [1, 0]: "b"
- [1, 1]: "c"
-
- sp_inputs[1]: shape = [2, 4]
- [0, 1]: "d"
- [0, 2]: "e"
-
-then the output will be
-
- shape = [2, 7]
- [0, 2]: "a"
- [0, 4]: "d"
- [0, 5]: "e"
- [1, 0]: "b"
- [1, 1]: "c"
-
-Graphically this is equivalent to doing
-
- [ a] concat [ d e ] = [ a d e ]
- [b c ] [ ] [b c ]
-END
-}
-op {
- graph_op_name: "SparseConditionalAccumulator"
- endpoint {
- name: "SparseConditionalAccumulator"
- }
- summary: "A conditional accumulator for aggregating sparse gradients."
- description: <<END
-The accumulator accepts gradients marked with local_step greater or
-equal to the most recent global_step known to the accumulator. The
-average can be extracted from the accumulator, provided sufficient
-gradients have been accumulated. Extracting the average automatically
-resets the aggregate to 0, and increments the global_step recorded by
-the accumulator.
-END
-}
-op {
- graph_op_name: "SparseCross"
- endpoint {
- name: "SparseCross"
- }
- summary: "Generates sparse cross from a list of sparse and dense tensors."
- description: <<END
-The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
-representing features of one feature column. It outputs a 2D `SparseTensor` with
-the batchwise crosses of these features.
-
-For example, if the inputs are
-
- inputs[0]: SparseTensor with shape = [2, 2]
- [0, 0]: "a"
- [1, 0]: "b"
- [1, 1]: "c"
-
- inputs[1]: SparseTensor with shape = [2, 1]
- [0, 0]: "d"
- [1, 0]: "e"
-
- inputs[2]: Tensor [["f"], ["g"]]
-
-then the output will be
-
- shape = [2, 2]
- [0, 0]: "a_X_d_X_f"
- [1, 0]: "b_X_e_X_g"
- [1, 1]: "c_X_e_X_g"
-
-if hashed_output=true then the output will be
-
- shape = [2, 2]
- [0, 0]: FingerprintCat64(
- Fingerprint64("f"), FingerprintCat64(
- Fingerprint64("d"), Fingerprint64("a")))
- [1, 0]: FingerprintCat64(
- Fingerprint64("g"), FingerprintCat64(
- Fingerprint64("e"), Fingerprint64("b")))
- [1, 1]: FingerprintCat64(
- Fingerprint64("g"), FingerprintCat64(
- Fingerprint64("e"), Fingerprint64("c")))
-END
-}
-op {
- graph_op_name: "SparseDenseCwiseAdd"
- endpoint {
- name: "SparseDenseCwiseAdd"
- }
- summary: "Adds up a SparseTensor and a dense Tensor, using these special rules:"
- description: <<END
-(1) Broadcasts the dense side to have the same shape as the sparse side, if
- eligible;
-(2) Then, only the dense values pointed to by the indices of the SparseTensor
- participate in the cwise addition.
-
-By these rules, the result is a logical SparseTensor with exactly the same
-indices and shape, but possibly with different non-zero values. The output of
-this Op is the resultant non-zero values.
-END
-}
-op {
- graph_op_name: "SparseDenseCwiseDiv"
- endpoint {
- name: "SparseDenseCwiseDiv"
- }
- summary: "Component-wise divides a SparseTensor by a dense Tensor."
- description: <<END
-*Limitation*: this Op only broadcasts the dense side to the sparse side, but not
-the other direction.
-END
-}
-op {
- graph_op_name: "SparseDenseCwiseMul"
- endpoint {
- name: "SparseDenseCwiseMul"
- }
- summary: "Component-wise multiplies a SparseTensor by a dense Tensor."
- description: <<END
-The output locations corresponding to the implicitly zero elements in the sparse
-tensor will be zero (i.e., will not take up storage space), regardless of the
-contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
-
-*Limitation*: this Op only broadcasts the dense side to the sparse side, but not
-the other direction.
-END
-}
-op {
- graph_op_name: "SparseFillEmptyRows"
- endpoint {
- name: "SparseFillEmptyRows"
- }
- summary: "Fills empty rows in the input 2-D `SparseTensor` with a default value."
- description: <<END
-The input `SparseTensor` is represented via the tuple of inputs
-(`indices`, `values`, `dense_shape`). The output `SparseTensor` has the
-same `dense_shape` but with indices `output_indices` and values
-`output_values`.
-
-This op inserts a single entry for every row that doesn't have any values.
-The index is created as `[row, 0, ..., 0]` and the inserted value
-is `default_value`.
-
-For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
-
- [0, 1]: a
- [0, 3]: b
- [2, 0]: c
- [3, 1]: d
-
-Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
-
- [0, 1]: a
- [0, 3]: b
- [1, 0]: default_value
- [2, 0]: c
- [3, 1]: d
- [4, 0]: default_value
-
-The output `SparseTensor` will be in row-major order and will have the
-same shape as the input.
-
-This op also returns an indicator vector shaped `[dense_shape[0]]` such that
-
- empty_row_indicator[i] = True iff row i was an empty row.
-
-And a reverse index map vector shaped `[indices.shape[0]]` that is used during
-backpropagation,
-
- reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
-END
-}
-op {
- graph_op_name: "SparseFillEmptyRowsGrad"
- endpoint {
- name: "SparseFillEmptyRowsGrad"
- }
- summary: "The gradient of SparseFillEmptyRows."
- description: <<END
-Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
-shaped `[N_full]`, where `N_full >= N` and copies data into either
-`d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and
-`d_default_value` is a scalar.
-
- d_values[j] = grad_values[reverse_index_map[j]]
- d_default_value = sum_{k : 0 .. N_full - 1} (
- grad_values[k] * 1{k not in reverse_index_map})
-END
-}
-op {
- graph_op_name: "SparseMatMul"
- endpoint {
- name: "SparseMatMul"
- }
- summary: "Multiply matrix \"a\" by matrix \"b\"."
- description: <<END
-The inputs must be two-dimensional matrices and the inner dimension of "a" must
-match the outer dimension of "b". This op is optimized for the case where at
-least one of "a" or "b" is sparse. The breakeven for using this versus a dense
-matrix multiply on one platform was 30% zero values in the sparse matrix.
-
-The gradient computation of this operation will only take advantage of sparsity
-in the input gradient when that gradient comes from a Relu.
-END
-}
-op {
- graph_op_name: "SparseReduceMax"
- endpoint {
- name: "SparseReduceMax"
- }
- summary: "Computes the max of elements across dimensions of a SparseTensor."
- description: <<END
-This Op takes a SparseTensor and is the sparse counterpart to
-`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
-instead of a sparse one.
-
-Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
-`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
-with length 1.
-
-If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
-with a single element is returned. Additionally, the axes can be negative,
-which are interpreted according to the indexing rules in Python.
-END
-}
-op {
- graph_op_name: "SparseReduceMaxSparse"
- endpoint {
- name: "SparseReduceMaxSparse"
- }
- summary: "Computes the max of elements across dimensions of a SparseTensor."
- description: <<END
-This Op takes a SparseTensor and is the sparse counterpart to
-`tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a
-SparseTensor.
-
-Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
-`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
-with length 1.
-
-If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
-with a single element is returned. Additionally, the axes can be negative,
-which are interpreted according to the indexing rules in Python.
-END
-}
-op {
- graph_op_name: "SparseReduceSum"
- endpoint {
- name: "SparseReduceSum"
- }
- summary: "Computes the sum of elements across dimensions of a SparseTensor."
- description: <<END
-This Op takes a SparseTensor and is the sparse counterpart to
-`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
-instead of a sparse one.
-
-Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
-`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
-with length 1.
-
-If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
-with a single element is returned. Additionally, the axes can be negative,
-which are interpreted according to the indexing rules in Python.
-END
-}
-op {
- graph_op_name: "SparseReduceSumSparse"
- endpoint {
- name: "SparseReduceSumSparse"
- }
- summary: "Computes the sum of elements across dimensions of a SparseTensor."
- description: <<END
-This Op takes a SparseTensor and is the sparse counterpart to
-`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
-SparseTensor.
-
-Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
-`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
-with length 1.
-
-If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
-with a single element is returned. Additionally, the axes can be negative,
-which are interpreted according to the indexing rules in Python.
-END
-}
-op {
- graph_op_name: "SparseReorder"
- endpoint {
- name: "SparseReorder"
- }
- summary: "Reorders a SparseTensor into the canonical, row-major ordering."
- description: <<END
-Note that by convention, all sparse ops preserve the canonical ordering along
-increasing dimension number. The only time ordering can be violated is during
-manual manipulation of the indices and values vectors to add entries.
-
-Reordering does not affect the shape of the SparseTensor.
-
-If the tensor has rank `R` and `N` non-empty values, `input_indices` has
-shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
-END
-}
-op {
- graph_op_name: "SparseReshape"
- endpoint {
- name: "SparseReshape"
- }
- summary: "Reshapes a SparseTensor to represent values in a new dense shape."
- description: <<END
-This operation has the same semantics as reshape on the represented dense
-tensor. The `input_indices` are recomputed based on the requested `new_shape`.
-
-If one component of `new_shape` is the special value -1, the size of that
-dimension is computed so that the total dense size remains constant. At
-most one component of `new_shape` can be -1. The number of dense elements
-implied by `new_shape` must be the same as the number of dense elements
-originally implied by `input_shape`.
-
-Reshaping does not affect the order of values in the SparseTensor.
-
-If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
-has length `R_out`, then `input_indices` has shape `[N, R_in]`,
-`input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
-`output_shape` has length `R_out`.
-END
-}
-op {
- graph_op_name: "SparseSegmentMean"
- endpoint {
- name: "SparseSegmentMean"
- }
- summary: "Computes the mean along sparse segments of a tensor."
- description: <<END
-Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-segments.
-
-Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
-dimension, selecting a subset of dimension 0, specified by `indices`.
-END
-}
-op {
- graph_op_name: "SparseSegmentMeanGrad"
- endpoint {
- name: "SparseSegmentMeanGrad"
- }
- summary: "Computes gradients for SparseSegmentMean."
- description: <<END
-Returns tensor "output" with same shape as grad, except for dimension 0 whose
-value is output_dim0.
-END
-}
-op {
- graph_op_name: "SparseSegmentSqrtN"
- endpoint {
- name: "SparseSegmentSqrtN"
- }
- summary: "Computes the sum along sparse segments of a tensor divided by the sqrt of N."
- description: <<END
-N is the size of the segment being reduced.
-
-Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-segments.
-END
-}
-op {
- graph_op_name: "SparseSegmentSqrtNGrad"
- endpoint {
- name: "SparseSegmentSqrtNGrad"
- }
- summary: "Computes gradients for SparseSegmentSqrtN."
- description: <<END
-Returns tensor "output" with same shape as grad, except for dimension 0 whose
-value is output_dim0.
-END
-}
-op {
- graph_op_name: "SparseSegmentSum"
- endpoint {
- name: "SparseSegmentSum"
- }
- summary: "Computes the sum along sparse segments of a tensor."
- description: <<END
-Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-segments.
-
-Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
-dimension, selecting a subset of dimension 0, specified by `indices`.
-
-For example:
-
-```python
-c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
-
-# Select two rows, one segment.
-tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
-# => [[0 0 0 0]]
-
-# Select two rows, two segment.
-tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
-# => [[ 1 2 3 4]
-# [-1 -2 -3 -4]]
-
-# Select all rows, two segments.
-tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
-# => [[0 0 0 0]
-# [5 6 7 8]]
-
-# Which is equivalent to:
-tf.segment_sum(c, tf.constant([0, 0, 1]))
-```
-END
-}
-op {
- graph_op_name: "SparseSlice"
- endpoint {
- name: "SparseSlice"
- }
- summary: "Slice a `SparseTensor` based on the `start` and `size`."
- description: <<END
-For example, if the input is
-
- input_tensor = shape = [2, 7]
- [ a d e ]
- [b c ]
-
-Graphically the output tensors are:
-
- sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
- [ a ]
- [b c ]
-
- sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
- [ d e ]
- [ ]
-END
-}
-op {
- graph_op_name: "SparseSoftmax"
- endpoint {
- name: "SparseSoftmax"
- }
- summary: "Applies softmax to a batched N-D `SparseTensor`."
- description: <<END
-The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
-(where `N >= 2`), and with indices sorted in the canonical lexicographic order.
-
-This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
-logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
-zero elements do not participate*. Specifically, the algorithm is equivalent
-to the following:
-
- (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
- with shape `[B, C]`, along the size-C dimension;
- (2) Masks out the original implicitly-zero locations;
- (3) Renormalizes the remaining elements.
-
-Hence, the `SparseTensor` result has exactly the same non-zero indices and
-shape.
-END
-}
-op {
- graph_op_name: "SparseSoftmaxCrossEntropyWithLogits"
- endpoint {
- name: "SparseSoftmaxCrossEntropyWithLogits"
- }
- summary: "Computes softmax cross entropy cost and gradients to backpropagate."
- description: <<END
-Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
-a matrix of label probabilities, but rather a single label per row
-of features. This label is considered to have probability 1.0 for the
-given row.
-
-Inputs are the logits, not probabilities.
-END
-}
-op {
- graph_op_name: "SparseSparseMaximum"
- endpoint {
- name: "SparseSparseMaximum"
- }
- summary: "Returns the element-wise max of two SparseTensors."
- description: <<END
-Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
-END
-}
-op {
- graph_op_name: "SparseSparseMinimum"
- endpoint {
- name: "SparseSparseMinimum"
- }
- summary: "Returns the element-wise min of two SparseTensors."
- description: <<END
-Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
-END
-}
-op {
- graph_op_name: "SparseSplit"
- endpoint {
- name: "SparseSplit"
- }
- summary: "Split a `SparseTensor` into `num_split` tensors along one dimension."
- description: <<END
-If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
-`[0 : shape[split_dim] % num_split]` gets one extra dimension.
-For example, if `split_dim = 1` and `num_split = 2` and the input is
-
- input_tensor = shape = [2, 7]
- [ a d e ]
- [b c ]
-
-Graphically the output tensors are:
-
- output_tensor[0] = shape = [2, 4]
- [ a ]
- [b c ]
-
- output_tensor[1] = shape = [2, 3]
- [ d e ]
- [ ]
-END
-}
-op {
- graph_op_name: "SparseTensorDenseAdd"
- endpoint {
- name: "SparseTensorDenseAdd"
- }
- summary: "Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`."
- description: <<END
-This Op does not require `a_indices` be sorted in standard lexicographic order.
-END
-}
-op {
- graph_op_name: "SparseTensorDenseMatMul"
- endpoint {
- name: "SparseTensorDenseMatMul"
- }
- summary: "Multiply SparseTensor (of rank 2) \"A\" by dense matrix \"B\"."
- description: <<END
-No validity checking is performed on the indices of A. However, the following
-input format is recommended for optimal behavior:
-
-if adjoint_a == false:
- A should be sorted in lexicographically increasing order. Use SparseReorder
- if you're not sure.
-if adjoint_a == true:
- A should be sorted in order of increasing dimension 1 (i.e., "column major"
- order instead of "row major" order).
-END
-}
-op {
- graph_op_name: "SparseTensorSliceDataset"
- endpoint {
- name: "SparseTensorSliceDataset"
- }
- summary: "Creates a dataset that splits a SparseTensor into elements row-wise."
-}
-op {
- graph_op_name: "SparseToDense"
- endpoint {
- name: "SparseToDense"
- }
- summary: "Converts a sparse representation into a dense tensor."
- description: <<END
-Builds an array `dense` with shape `output_shape` such that
-
-```
-# If sparse_indices is scalar
-dense[i] = (i == sparse_indices ? sparse_values : default_value)
-
-# If sparse_indices is a vector, then for each i
-dense[sparse_indices[i]] = sparse_values[i]
-
-# If sparse_indices is an n by d matrix, then for each i in [0, n)
-dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
-```
-
-All other values in `dense` are set to `default_value`. If `sparse_values` is a
-scalar, all sparse indices are set to this single value.
-
-Indices should be sorted in lexicographic order, and indices must not
-contain any repeats. If `validate_indices` is true, these properties
-are checked during execution.
-END
-}
-op {
- graph_op_name: "SparseToSparseSetOperation"
- endpoint {
- name: "SparseToSparseSetOperation"
- }
- summary: "Applies set operation along last dimension of 2 `SparseTensor` inputs."
- description: <<END
-See SetOperationOp::SetOperationFromContext for values of `set_operation`.
-
-If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
-order and range of `set1` and `set2` indices.
-
-Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
-and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
-as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
-ignored.
-
-Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
-and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
-as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
-ignored.
-
-If `validate_indices` is `True`, this op validates the order and range of `set1`
-and `set2` indices.
-
-Output `result` is a `SparseTensor` represented by `result_indices`,
-`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
-has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
-dimension contains the result of `set_operation` applied to the corresponding
-`[0...n-1]` dimension of `set`.
-END
-}
-op {
- graph_op_name: "Split"
- endpoint {
- name: "Split"
- }
- summary: "Splits a tensor into `num_split` tensors along one dimension."
-}
-op {
- graph_op_name: "SplitV"
- endpoint {
- name: "SplitV"
- }
- summary: "Splits a tensor into `num_split` tensors along one dimension."
-}
-op {
- graph_op_name: "SqlDataset"
- endpoint {
- name: "SqlDataset"
- }
- summary: "Creates a dataset that executes a SQL query and emits rows of the result set."
-}
-op {
- graph_op_name: "Sqrt"
- endpoint {
- name: "Sqrt"
- }
- summary: "Computes square root of x element-wise."
- description: <<END
-I.e., \\(y = \sqrt{x} = x^{1/2}\\).
-END
-}
-op {
- graph_op_name: "SqrtGrad"
- endpoint {
- name: "SqrtGrad"
- }
- summary: "Computes the gradient for the sqrt of `x` wrt its input."
- description: <<END
-Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
-is the corresponding input gradient.
-END
-}
-op {
- graph_op_name: "Square"
- endpoint {
- name: "Square"
- }
- summary: "Computes square of x element-wise."
- description: <<END
-I.e., \\(y = x * x = x^2\\).
-END
-}
-op {
- graph_op_name: "SquaredDifference"
- endpoint {
- name: "SquaredDifference"
- }
- summary: "Returns (x - y)(x - y) element-wise."
- description: <<END
-*NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "Squeeze"
- endpoint {
- name: "Squeeze"
- }
- summary: "Removes dimensions of size 1 from the shape of a tensor."
- description: <<END
-Given a tensor `input`, this operation returns a tensor of the same type with
-all dimensions of size 1 removed. If you don't want to remove all size 1
-dimensions, you can remove specific size 1 dimensions by specifying
-`squeeze_dims`.
-
-For example:
-
-```
-# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
-shape(squeeze(t)) ==> [2, 3]
-```
-
-Or, to remove specific size 1 dimensions:
-
-```
-# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
-shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
-```
-END
-}
-op {
- graph_op_name: "Stack"
- endpoint {
- name: "Stack"
- }
- summary: "Deprecated, use StackV2."
-}
-op {
- graph_op_name: "StackClose"
- endpoint {
- name: "StackClose"
- }
- summary: "Deprecated, use StackCloseV2."
-}
-op {
- graph_op_name: "StackCloseV2"
- endpoint {
- name: "StackCloseV2"
- }
- summary: "Delete the stack from its resource container."
-}
-op {
- graph_op_name: "StackPop"
- endpoint {
- name: "StackPop"
- }
- summary: "Deprecated, use StackPopV2."
-}
-op {
- graph_op_name: "StackPopV2"
- endpoint {
- name: "StackPopV2"
- }
- summary: "Pop the element at the top of the stack."
-}
-op {
- graph_op_name: "StackPush"
- endpoint {
- name: "StackPush"
- }
- summary: "Deprecated, use StackPushV2."
-}
-op {
- graph_op_name: "StackPushV2"
- endpoint {
- name: "StackPushV2"
- }
- summary: "Push an element onto the stack."
-}
-op {
- graph_op_name: "StackV2"
- endpoint {
- name: "StackV2"
- }
- summary: "A stack that produces elements in first-in last-out order."
-}
-op {
- graph_op_name: "Stage"
- endpoint {
- name: "Stage"
- }
- summary: "Stage values similar to a lightweight Enqueue."
- description: <<END
-The basic functionality of this Op is similar to a queue with many
-fewer capabilities and options. This Op is optimized for performance.
-END
-}
-op {
- graph_op_name: "StageClear"
- endpoint {
- name: "StageClear"
- }
- summary: "Op removes all elements in the underlying container."
-}
-op {
- graph_op_name: "StagePeek"
- endpoint {
- name: "StagePeek"
- }
- summary: "Op peeks at the values at the specified index. If the"
- description: <<END
-underlying container does not contain sufficient elements
-this op will block until it does. This Op is optimized for
-performance.
-END
-}
-op {
- graph_op_name: "StageSize"
- endpoint {
- name: "StageSize"
- }
- summary: "Op returns the number of elements in the underlying container."
-}
-op {
- graph_op_name: "StatelessRandomNormal"
- endpoint {
- name: "StatelessRandomNormal"
- }
- summary: "Outputs deterministic pseudorandom values from a normal distribution."
- description: <<END
-The generated values will have mean 0 and standard deviation 1.
-
-The outputs are a deterministic function of `shape` and `seed`.
-END
-}
-op {
- graph_op_name: "StatelessRandomUniform"
- endpoint {
- name: "StatelessRandomUniform"
- }
- summary: "Outputs deterministic pseudorandom random values from a uniform distribution."
- description: <<END
-The generated values follow a uniform distribution in the range `[0, 1)`. The
-lower bound 0 is included in the range, while the upper bound 1 is excluded.
-
-The outputs are a deterministic function of `shape` and `seed`.
-END
-}
-op {
- graph_op_name: "StatelessTruncatedNormal"
- endpoint {
- name: "StatelessTruncatedNormal"
- }
- summary: "Outputs deterministic pseudorandom values from a truncated normal distribution."
- description: <<END
-The generated values follow a normal distribution with mean 0 and standard
-deviation 1, except that values whose magnitude is more than 2 standard
-deviations from the mean are dropped and re-picked.
-
-The outputs are a deterministic function of `shape` and `seed`.
-END
-}
-op {
- graph_op_name: "StopGradient"
- endpoint {
- name: "StopGradient"
- }
- summary: "Stops gradient computation."
- description: <<END
-When executed in a graph, this op outputs its input tensor as-is.
-
-When building ops to compute gradients, this op prevents the contribution of
-its inputs to be taken into account. Normally, the gradient generator adds ops
-to a graph to compute the derivatives of a specified 'loss' by recursively
-finding out inputs that contributed to its computation. If you insert this op
-in the graph it inputs are masked from the gradient generator. They are not
-taken into account for computing gradients.
-
-This is useful any time you want to compute a value with TensorFlow but need
-to pretend that the value was a constant. Some examples include:
-
-* The *EM* algorithm where the *M-step* should not involve backpropagation
- through the output of the *E-step*.
-* Contrastive divergence training of Boltzmann machines where, when
- differentiating the energy function, the training must not backpropagate
- through the graph that generated the samples from the model.
-* Adversarial training, where no backprop should happen through the adversarial
- example generation process.
-END
-}
-op {
- graph_op_name: "StridedSlice"
- endpoint {
- name: "StridedSlice"
- }
- summary: "Return a strided slice from `input`."
- description: <<END
-Note, most python users will want to use the Python `Tensor.__getitem__`
-or `Variable.__getitem__` rather than this op directly.
-
-The goal of this op is to produce a new tensor with a subset of
-the elements from the `n` dimensional `input` tensor. The subset is chosen using
-a sequence of `m` sparse range specifications encoded into the arguments
-of this function. Note, in some cases
-`m` could be equal to `n`, but this need not be the case. Each
-range specification entry can be one of the following:
-
-- An ellipsis (...). Ellipses are used to imply zero or more
- dimensions of full-dimension selection and are produced using
- `ellipsis_mask`. For example, `foo[...]` is the identity slice.
-
-- A new axis. This is used to insert a new shape=1 dimension and is
- produced using `new_axis_mask`. For example, `foo[:, ...]` where
- `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
-
-
-- A range `begin:end:stride`. This is used to specify how much to choose from
- a given dimension. `stride` can be any integer but 0. `begin` is an integer
- which represents the index of the first value to select while `end` represents
- the index of the last value to select. The number of values selected in each
- dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
- `begin` and `end` can be negative where `-1` is the last element, `-2` is
- the second to last. `begin_mask` controls whether to replace the explicitly
- given `begin` with an implicit effective value of `0` if `stride > 0` and
- `-1` if `stride < 0`. `end_mask` is analogous but produces the number
- required to create the largest open interval. For example, given a shape
- `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
- not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
- and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
- first dimension of a tensor while dropping the last two (in the original
- order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
-
-- A single index. This is used to keep only elements that have a given
- index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
- shape `(6,)` tensor. This is encoded in `begin` and `end` and
- `shrink_axis_mask`.
-
-Each conceptual range specification is encoded in the op's argument. This
-encoding is best understand by considering a non-trivial example. In
-particular,
-`foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
-
-```
-begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
-end = [2, 4, x, x, -3, x]
-strides = [1, 1, x, x, -1, 1]
-begin_mask = 1<<4 | 1 << 5 = 48
-end_mask = 1<<5 = 32
-ellipsis_mask = 1<<3 = 8
-new_axis_mask = 1<<2 4
-shrink_axis_mask = 1<<0
-```
-
-In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
-the slice becomes (2, 1, 5, 5, 2, 5).
-Let us walk step by step through each argument specification.
-
-1. The first argument in the example slice is turned into `begin = 1` and
-`end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
-also set the appropriate bit in `shrink_axis_mask`.
-
-2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
-zero bits contributed.
-
-3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
-dimension in the final shape. Dummy values are contributed to begin,
-end and stride, while the new_axis_mask bit is set.
-
-4. `...` grab the full ranges from as many dimensions as needed to
-fully specify a slice for every dimension of the input shape.
-
-5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
-with a dimension that has shape `s` is converted to a positive index
-`s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
-is done internally so begin, end and strides receive x, -3, and -1.
-The appropriate begin_mask bit is set to indicate the start range is the
-full range (ignoring the x).
-
-6. `:` indicates that the entire contents of the corresponding dimension
-is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
-receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
-`end_mask` are also set.
-
-*Requirements*:
- `0 != strides[i] for i in [0, m)`
- `ellipsis_mask must be a power of two (only one ellipsis)`
-END
-}
-op {
- graph_op_name: "StridedSliceAssign"
- endpoint {
- name: "StridedSliceAssign"
- }
- summary: "Assign `value` to the sliced l-value reference of `ref`."
- description: <<END
-The values of `value` are assigned to the positions in the variable
-`ref` that are selected by the slice parameters. The slice parameters
-`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
-
-NOTE this op currently does not support broadcasting and so `value`'s
-shape must be exactly the shape produced by the slice of `ref`.
-END
-}
-op {
- graph_op_name: "StridedSliceGrad"
- endpoint {
- name: "StridedSliceGrad"
- }
- summary: "Returns the gradient of `StridedSlice`."
- description: <<END
-Since `StridedSlice` cuts out pieces of its `input` which is size
-`shape`, its gradient will have the same shape (which is passed here
-as `shape`). The gradient will be zero in any element that the slice
-does not select.
-
-Arguments are the same as StridedSliceGrad with the exception that
-`dy` is the input gradient to be propagated and `shape` is the
-shape of `StridedSlice`'s `input`.
-END
-}
-op {
- graph_op_name: "StringJoin"
- endpoint {
- name: "StringJoin"
- }
- summary: "Joins the strings in the given list of string tensors into one tensor;"
- description: <<END
-with the given separator (default is an empty separator).
-END
-}
-op {
- graph_op_name: "StringSplit"
- endpoint {
- name: "StringSplit"
- }
- summary: "Split elements of `input` based on `delimiter` into a `SparseTensor`."
- description: <<END
-Let N be the size of source (typically N will be the batch size). Split each
-element of `input` based on `delimiter` and return a `SparseTensor`
-containing the splitted tokens. Empty tokens are ignored.
-
-`delimiter` can be empty, or a string of split characters. If `delimiter` is an
- empty string, each element of `input` is split into individual single-byte
- character strings, including splitting of UTF-8 multibyte sequences. Otherwise
- every character of `delimiter` is a potential split point.
-
-For example:
- N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
- will be
-
- indices = [0, 0;
- 0, 1;
- 1, 0;
- 1, 1;
- 1, 2]
- shape = [2, 3]
- values = ['hello', 'world', 'a', 'b', 'c']
-END
-}
-op {
- graph_op_name: "StringToHashBucket"
- endpoint {
- name: "StringToHashBucket"
- }
- summary: "Converts each string in the input Tensor to its hash mod by a number of buckets."
- description: <<END
-The hash function is deterministic on the content of the string within the
-process.
-
-Note that the hash function may change from time to time.
-This functionality will be deprecated and it's recommended to use
-`tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
-END
-}
-op {
- graph_op_name: "StringToHashBucketFast"
- endpoint {
- name: "StringToHashBucketFast"
- }
- summary: "Converts each string in the input Tensor to its hash mod by a number of buckets."
- description: <<END
-The hash function is deterministic on the content of the string within the
-process and will never change. However, it is not suitable for cryptography.
-This function may be used when CPU time is scarce and inputs are trusted or
-unimportant. There is a risk of adversaries constructing inputs that all hash
-to the same bucket. To prevent this problem, use a strong hash function with
-`tf.string_to_hash_bucket_strong`.
-END
-}
-op {
- graph_op_name: "StringToHashBucketStrong"
- endpoint {
- name: "StringToHashBucketStrong"
- }
- summary: "Converts each string in the input Tensor to its hash mod by a number of buckets."
- description: <<END
-The hash function is deterministic on the content of the string within the
-process. The hash function is a keyed hash function, where attribute `key`
-defines the key of the hash function. `key` is an array of 2 elements.
-
-A strong hash is important when inputs may be malicious, e.g. URLs with
-additional components. Adversaries could try to make their inputs hash to the
-same bucket for a denial-of-service attack or to skew the results. A strong
-hash prevents this by making it difficult, if not infeasible, to compute inputs
-that hash to the same bucket. This comes at a cost of roughly 4x higher compute
-time than `tf.string_to_hash_bucket_fast`.
-END
-}
-op {
- graph_op_name: "StringToNumber"
- endpoint {
- name: "StringToNumber"
- }
- summary: "Converts each string in the input Tensor to the specified numeric type."
- description: <<END
-(Note that int32 overflow results in an error while float overflow
-results in a rounded value.)
-END
-}
-op {
- graph_op_name: "Sub"
- endpoint {
- name: "Sub"
- }
- summary: "Returns x - y element-wise."
- description: <<END
-*NOTE*: `Sub` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "Substr"
- endpoint {
- name: "Substr"
- }
- summary: "Return substrings from `Tensor` of strings."
- description: <<END
-For each string in the input `Tensor`, creates a substring starting at index
-`pos` with a total length of `len`.
-
-If `len` defines a substring that would extend beyond the length of the input
-string, then as many characters as possible are used.
-
-If `pos` is negative or specifies a character index larger than any of the input
-strings, then an `InvalidArgumentError` is thrown.
-
-`pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on
-Op creation.
-
-*NOTE*: `Substr` supports broadcasting up to two dimensions. More about
-broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-
----
-
-Examples
-
-Using scalar `pos` and `len`:
-
-```python
-input = [b'Hello', b'World']
-position = 1
-length = 3
-
-output = [b'ell', b'orl']
-```
-
-Using `pos` and `len` with same shape as `input`:
-
-```python
-input = [[b'ten', b'eleven', b'twelve'],
- [b'thirteen', b'fourteen', b'fifteen'],
- [b'sixteen', b'seventeen', b'eighteen']]
-position = [[1, 2, 3],
- [1, 2, 3],
- [1, 2, 3]]
-length = [[2, 3, 4],
- [4, 3, 2],
- [5, 5, 5]]
-
-output = [[b'en', b'eve', b'lve'],
- [b'hirt', b'urt', b'te'],
- [b'ixtee', b'vente', b'hteen']]
-```
-
-Broadcasting `pos` and `len` onto `input`:
-
-```
-input = [[b'ten', b'eleven', b'twelve'],
- [b'thirteen', b'fourteen', b'fifteen'],
- [b'sixteen', b'seventeen', b'eighteen'],
- [b'nineteen', b'twenty', b'twentyone']]
-position = [1, 2, 3]
-length = [1, 2, 3]
-
-output = [[b'e', b'ev', b'lve'],
- [b'h', b'ur', b'tee'],
- [b'i', b've', b'hte'],
- [b'i', b'en', b'nty']]
-```
-
-Broadcasting `input` onto `pos` and `len`:
-
-```
-input = b'thirteen'
-position = [1, 5, 7]
-length = [3, 2, 1]
-
-output = [b'hir', b'ee', b'n']
-```
-END
-}
-op {
- graph_op_name: "Sum"
- endpoint {
- name: "Sum"
- }
- summary: "Computes the sum of elements across dimensions of a tensor."
- description: <<END
-Reduces `input` along the dimensions given in `reduction_indices`. Unless
-`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
-retained with length 1.
-END
-}
-op {
- graph_op_name: "Svd"
- endpoint {
- name: "Svd"
- }
- summary: "Computes the singular value decompositions of one or more matrices."
- description: <<END
-Computes the SVD of each inner matrix in `input` such that
-`input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
-
-```python
-# a is a tensor containing a batch of matrices.
-# s is a tensor of singular values for each matrix.
-# u is the tensor containing of left singular vectors for each matrix.
-# v is the tensor containing of right singular vectors for each matrix.
-s, u, v = svd(a)
-s, _, _ = svd(a, compute_uv=False)
-```
-END
-}
-op {
- graph_op_name: "Switch"
- endpoint {
- name: "Switch"
- }
- summary: "Forwards `data` to the output port determined by `pred`."
- description: <<END
-If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
-the data goes to `output_false`.
-
-See also `RefSwitch` and `Merge`.
-END
-}
-op {
- graph_op_name: "SymbolicGradient"
- endpoint {
- name: "SymbolicGradient"
- }
- summary: "Computes the gradient function for function f via backpropagation."
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_SampleDistortedBoundingBox.pbtxt b/tensorflow/core/api_def/base_api/api_def_SampleDistortedBoundingBox.pbtxt
new file mode 100644
index 0000000000..0716b26114
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SampleDistortedBoundingBox.pbtxt
@@ -0,0 +1,131 @@
+op {
+ graph_op_name: "SampleDistortedBoundingBox"
+ in_arg {
+ name: "image_size"
+ description: <<END
+1-D, containing `[height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "bounding_boxes"
+ description: <<END
+3-D with shape `[batch, N, 4]` describing the N bounding boxes
+associated with the image.
+END
+ }
+ out_arg {
+ name: "begin"
+ description: <<END
+1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
+`tf.slice`.
+END
+ }
+ out_arg {
+ name: "size"
+ description: <<END
+1-D, containing `[target_height, target_width, -1]`. Provide as input to
+`tf.slice`.
+END
+ }
+ out_arg {
+ name: "bboxes"
+ description: <<END
+3-D with shape `[1, 1, 4]` containing the distorted bounding box.
+Provide as input to `tf.image.draw_bounding_boxes`.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either `seed` or `seed2` are set to non-zero, the random number
+generator is seeded by the given `seed`. Otherwise, it is seeded by a random
+seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ attr {
+ name: "min_object_covered"
+ description: <<END
+The cropped area of the image must contain at least this
+fraction of any bounding box supplied. The value of this parameter should be
+non-negative. In the case of 0, the cropped area does not need to overlap
+any of the bounding boxes supplied.
+END
+ }
+ attr {
+ name: "aspect_ratio_range"
+ description: <<END
+The cropped area of the image must have an aspect ratio =
+width / height within this range.
+END
+ }
+ attr {
+ name: "area_range"
+ description: <<END
+The cropped area of the image must contain a fraction of the
+supplied image within in this range.
+END
+ }
+ attr {
+ name: "max_attempts"
+ description: <<END
+Number of attempts at generating a cropped region of the image
+of the specified constraints. After `max_attempts` failures, return the entire
+image.
+END
+ }
+ attr {
+ name: "use_image_if_no_bounding_boxes"
+ description: <<END
+Controls behavior if no bounding boxes supplied.
+If true, assume an implicit bounding box covering the whole input. If false,
+raise an error.
+END
+ }
+ summary: "Generate a single randomly distorted bounding box for an image."
+ description: <<END
+Bounding box annotations are often supplied in addition to ground-truth labels
+in image recognition or object localization tasks. A common technique for
+training such a system is to randomly distort an image while preserving
+its content, i.e. *data augmentation*. This Op outputs a randomly distorted
+localization of an object, i.e. bounding box, given an `image_size`,
+`bounding_boxes` and a series of constraints.
+
+The output of this Op is a single bounding box that may be used to crop the
+original image. The output is returned as 3 tensors: `begin`, `size` and
+`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
+image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
+what the bounding box looks like.
+
+Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
+bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
+height of the underlying image.
+
+For example,
+
+```python
+ # Generate a single distorted bounding box.
+ begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
+ tf.shape(image),
+ bounding_boxes=bounding_boxes)
+
+ # Draw the bounding box in an image summary.
+ image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
+ bbox_for_draw)
+ tf.image_summary('images_with_box', image_with_box)
+
+ # Employ the bounding box to distort the image.
+ distorted_image = tf.slice(image, begin, size)
+```
+
+Note that if no bounding box information is available, setting
+`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
+bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
+false and no bounding boxes are supplied, an error is raised.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SampleDistortedBoundingBoxV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_SampleDistortedBoundingBoxV2.pbtxt
new file mode 100644
index 0000000000..e991260972
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SampleDistortedBoundingBoxV2.pbtxt
@@ -0,0 +1,131 @@
+op {
+ graph_op_name: "SampleDistortedBoundingBoxV2"
+ in_arg {
+ name: "image_size"
+ description: <<END
+1-D, containing `[height, width, channels]`.
+END
+ }
+ in_arg {
+ name: "bounding_boxes"
+ description: <<END
+3-D with shape `[batch, N, 4]` describing the N bounding boxes
+associated with the image.
+END
+ }
+ in_arg {
+ name: "min_object_covered"
+ description: <<END
+The cropped area of the image must contain at least this
+fraction of any bounding box supplied. The value of this parameter should be
+non-negative. In the case of 0, the cropped area does not need to overlap
+any of the bounding boxes supplied.
+END
+ }
+ out_arg {
+ name: "begin"
+ description: <<END
+1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
+`tf.slice`.
+END
+ }
+ out_arg {
+ name: "size"
+ description: <<END
+1-D, containing `[target_height, target_width, -1]`. Provide as input to
+`tf.slice`.
+END
+ }
+ out_arg {
+ name: "bboxes"
+ description: <<END
+3-D with shape `[1, 1, 4]` containing the distorted bounding box.
+Provide as input to `tf.image.draw_bounding_boxes`.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either `seed` or `seed2` are set to non-zero, the random number
+generator is seeded by the given `seed`. Otherwise, it is seeded by a random
+seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ attr {
+ name: "aspect_ratio_range"
+ description: <<END
+The cropped area of the image must have an aspect ratio =
+width / height within this range.
+END
+ }
+ attr {
+ name: "area_range"
+ description: <<END
+The cropped area of the image must contain a fraction of the
+supplied image within in this range.
+END
+ }
+ attr {
+ name: "max_attempts"
+ description: <<END
+Number of attempts at generating a cropped region of the image
+of the specified constraints. After `max_attempts` failures, return the entire
+image.
+END
+ }
+ attr {
+ name: "use_image_if_no_bounding_boxes"
+ description: <<END
+Controls behavior if no bounding boxes supplied.
+If true, assume an implicit bounding box covering the whole input. If false,
+raise an error.
+END
+ }
+ summary: "Generate a single randomly distorted bounding box for an image."
+ description: <<END
+Bounding box annotations are often supplied in addition to ground-truth labels
+in image recognition or object localization tasks. A common technique for
+training such a system is to randomly distort an image while preserving
+its content, i.e. *data augmentation*. This Op outputs a randomly distorted
+localization of an object, i.e. bounding box, given an `image_size`,
+`bounding_boxes` and a series of constraints.
+
+The output of this Op is a single bounding box that may be used to crop the
+original image. The output is returned as 3 tensors: `begin`, `size` and
+`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
+image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
+what the bounding box looks like.
+
+Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
+bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
+height of the underlying image.
+
+For example,
+
+```python
+ # Generate a single distorted bounding box.
+ begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
+ tf.shape(image),
+ bounding_boxes=bounding_boxes)
+
+ # Draw the bounding box in an image summary.
+ image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
+ bbox_for_draw)
+ tf.image_summary('images_with_box', image_with_box)
+
+ # Employ the bounding box to distort the image.
+ distorted_image = tf.slice(image, begin, size)
+```
+
+Note that if no bounding box information is available, setting
+`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
+bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
+false and no bounding boxes are supplied, an error is raised.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Save.pbtxt b/tensorflow/core/api_def/base_api/api_def_Save.pbtxt
new file mode 100644
index 0000000000..ee75d6e4a6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Save.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "Save"
+ in_arg {
+ name: "filename"
+ description: <<END
+Must have a single element. The name of the file to which we write
+the tensor.
+END
+ }
+ in_arg {
+ name: "tensor_names"
+ description: <<END
+Shape `[N]`. The names of the tensors to be saved.
+END
+ }
+ in_arg {
+ name: "data"
+ description: <<END
+`N` tensors to save.
+END
+ }
+ summary: "Saves the input tensors to disk."
+ description: <<END
+The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
+is written to `filename` with name `tensor_names[i]`.
+
+See also `SaveSlices`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SaveSlices.pbtxt b/tensorflow/core/api_def/base_api/api_def_SaveSlices.pbtxt
new file mode 100644
index 0000000000..61df999b2d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SaveSlices.pbtxt
@@ -0,0 +1,53 @@
+op {
+ graph_op_name: "SaveSlices"
+ in_arg {
+ name: "filename"
+ description: <<END
+Must have a single element. The name of the file to which we write the
+tensor.
+END
+ }
+ in_arg {
+ name: "tensor_names"
+ description: <<END
+Shape `[N]`. The names of the tensors to be saved.
+END
+ }
+ in_arg {
+ name: "shapes_and_slices"
+ description: <<END
+Shape `[N]`. The shapes and slice specifications to use when
+saving the tensors.
+END
+ }
+ in_arg {
+ name: "data"
+ description: <<END
+`N` tensors to save.
+END
+ }
+ summary: "Saves input tensors slices to disk."
+ description: <<END
+This is like `Save` except that tensors can be listed in the saved file as being
+a slice of a larger tensor. `shapes_and_slices` specifies the shape of the
+larger tensor and the slice that this tensor covers. `shapes_and_slices` must
+have as many elements as `tensor_names`.
+
+Elements of the `shapes_and_slices` input must either be:
+
+* The empty string, in which case the corresponding tensor is
+ saved normally.
+* A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
+ `dimI` are the dimensions of the larger tensor and `slice-spec`
+ specifies what part is covered by the tensor to save.
+
+`slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
+where each `sliceI` is either:
+
+* The string `-` meaning that the slice covers all indices of this dimension
+* `start,length` where `start` and `length` are integers. In that
+ case the slice covers `length` indices starting at `start`.
+
+See also `Save`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SaveV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_SaveV2.pbtxt
new file mode 100644
index 0000000000..ee87514f25
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SaveV2.pbtxt
@@ -0,0 +1,35 @@
+op {
+ graph_op_name: "SaveV2"
+ in_arg {
+ name: "prefix"
+ description: <<END
+Must have a single element. The prefix of the V2 checkpoint to which we
+write the tensors.
+END
+ }
+ in_arg {
+ name: "tensor_names"
+ description: <<END
+shape {N}. The names of the tensors to be saved.
+END
+ }
+ in_arg {
+ name: "shape_and_slices"
+ description: <<END
+shape {N}. The slice specs of the tensors to be saved.
+Empty strings indicate that they are non-partitioned tensors.
+END
+ }
+ in_arg {
+ name: "tensors"
+ description: <<END
+`N` tensors to save.
+END
+ }
+ summary: "Saves tensors in V2 checkpoint format."
+ description: <<END
+By default, saves the named tensors in full. If the caller wishes to save
+specific slices of full tensors, "shape_and_slices" should be non-empty strings
+and correspondingly well-formed.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScalarSummary.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScalarSummary.pbtxt
new file mode 100644
index 0000000000..2cedb05b71
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScalarSummary.pbtxt
@@ -0,0 +1,26 @@
+op {
+ graph_op_name: "ScalarSummary"
+ in_arg {
+ name: "tags"
+ description: <<END
+Tags for the summary.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+Same shape as `tags. Values for the summary.
+END
+ }
+ out_arg {
+ name: "summary"
+ description: <<END
+Scalar. Serialized `Summary` protocol buffer.
+END
+ }
+ summary: "Outputs a `Summary` protocol buffer with scalar values."
+ description: <<END
+The input `tags` and `values` must have the same shape. The generated summary
+has a summary value for each tag-value pair in `tags` and `values`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScanDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScanDataset.pbtxt
new file mode 100644
index 0000000000..e83d4a9e96
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScanDataset.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ScanDataset"
+ summary: "Creates a dataset successively reduces `f` over the elements of `input_dataset`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterAdd.pbtxt
new file mode 100644
index 0000000000..4b5201f025
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterAdd.pbtxt
@@ -0,0 +1,60 @@
+op {
+ graph_op_name: "ScatterAdd"
+ in_arg {
+ name: "ref"
+ description: <<END
+Should be from a `Variable` node.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A tensor of indices into the first dimension of `ref`.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+A tensor of updated values to add to `ref`.
+END
+ }
+ out_arg {
+ name: "output_ref"
+ description: <<END
+= Same as `ref`. Returned as a convenience for operations that want
+to use the updated values after the update is done.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the addition will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Adds sparse updates to a variable reference."
+ description: <<END
+This operation computes
+
+ # Scalar indices
+ ref[indices, ...] += updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] += updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
+
+This operation outputs `ref` after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+
+Duplicate entries are handled correctly: if multiple `indices` reference
+the same location, their contributions add.
+
+Requires `updates.shape = indices.shape + ref.shape[1:]`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterDiv.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterDiv.pbtxt
new file mode 100644
index 0000000000..771cf0b591
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterDiv.pbtxt
@@ -0,0 +1,58 @@
+op {
+ graph_op_name: "ScatterDiv"
+ in_arg {
+ name: "ref"
+ description: <<END
+Should be from a `Variable` node.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A tensor of indices into the first dimension of `ref`.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+A tensor of values that `ref` is divided by.
+END
+ }
+ out_arg {
+ name: "output_ref"
+ description: <<END
+= Same as `ref`. Returned as a convenience for operations that want
+to use the updated values after the update is done.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the operation will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Divides a variable reference by sparse updates."
+ description: <<END
+This operation computes
+
+```python
+ # Scalar indices
+ ref[indices, ...] /= updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] /= updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
+```
+
+This operation outputs `ref` after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+
+Duplicate entries are handled correctly: if multiple `indices` reference
+the same location, their contributions divide.
+
+Requires `updates.shape = indices.shape + ref.shape[1:]`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterMul.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterMul.pbtxt
new file mode 100644
index 0000000000..a51f571b00
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterMul.pbtxt
@@ -0,0 +1,58 @@
+op {
+ graph_op_name: "ScatterMul"
+ in_arg {
+ name: "ref"
+ description: <<END
+Should be from a `Variable` node.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A tensor of indices into the first dimension of `ref`.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+A tensor of updated values to multiply to `ref`.
+END
+ }
+ out_arg {
+ name: "output_ref"
+ description: <<END
+= Same as `ref`. Returned as a convenience for operations that want
+to use the updated values after the update is done.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the operation will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Multiplies sparse updates into a variable reference."
+ description: <<END
+This operation computes
+
+```python
+ # Scalar indices
+ ref[indices, ...] *= updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] *= updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
+```
+
+This operation outputs `ref` after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+
+Duplicate entries are handled correctly: if multiple `indices` reference
+the same location, their contributions multiply.
+
+Requires `updates.shape = indices.shape + ref.shape[1:]`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt
new file mode 100644
index 0000000000..23732546ed
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt
@@ -0,0 +1,102 @@
+op {
+ graph_op_name: "ScatterNd"
+ in_arg {
+ name: "indices"
+ description: <<END
+Index tensor.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+Updates to scatter into output.
+END
+ }
+ in_arg {
+ name: "shape"
+ description: <<END
+1-D. The shape of the resulting tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A new tensor with the given shape and updates applied according
+to the indices.
+END
+ }
+ summary: "Scatter `updates` into a new (initially zero) tensor according to `indices`."
+ description: <<END
+Creates a new tensor by applying sparse `updates` to individual
+values or slices within a zero tensor of the given `shape` according to
+indices. This operator is the inverse of the @{tf.gather_nd} operator which
+extracts values or slices from a given tensor.
+
+**WARNING**: The order in which updates are applied is nondeterministic, so the
+output will be nondeterministic if `indices` contains duplicates.
+
+`indices` is an integer tensor containing indices into a new tensor of shape
+`shape`. The last dimension of `indices` can be at most the rank of `shape`:
+
+ indices.shape[-1] <= shape.rank
+
+The last dimension of `indices` corresponds to indices into elements
+(if `indices.shape[-1] = shape.rank`) or slices
+(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
+`shape`. `updates` is a tensor with shape
+
+ indices.shape[:-1] + shape[indices.shape[-1]:]
+
+The simplest form of scatter is to insert individual elements in a tensor by
+index. For example, say we want to insert 4 scattered elements in a rank-1
+tensor with 8 elements.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
+</div>
+
+In Python, this scatter operation would look like this:
+
+```python
+ indices = tf.constant([[4], [3], [1], [7]])
+ updates = tf.constant([9, 10, 11, 12])
+ shape = tf.constant([8])
+ scatter = tf.scatter_nd(indices, updates, shape)
+ with tf.Session() as sess:
+ print(sess.run(scatter))
+```
+
+The resulting tensor would look like this:
+
+ [0, 11, 0, 10, 9, 0, 0, 12]
+
+We can also, insert entire slices of a higher rank tensor all at once. For
+example, if we wanted to insert two slices in the first dimension of a
+rank-3 tensor with two matrices of new values.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
+</div>
+
+In Python, this scatter operation would look like this:
+
+```python
+ indices = tf.constant([[0], [2]])
+ updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
+ [7, 7, 7, 7], [8, 8, 8, 8]],
+ [[5, 5, 5, 5], [6, 6, 6, 6],
+ [7, 7, 7, 7], [8, 8, 8, 8]]])
+ shape = tf.constant([4, 4, 4])
+ scatter = tf.scatter_nd(indices, updates, shape)
+ with tf.Session() as sess:
+ print(sess.run(scatter))
+```
+
+The resulting tensor would look like this:
+
+ [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+ [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
+ [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+ [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt
new file mode 100644
index 0000000000..b0665ebf0e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt
@@ -0,0 +1,74 @@
+op {
+ graph_op_name: "ScatterNdAdd"
+ in_arg {
+ name: "ref"
+ description: <<END
+A mutable Tensor. Should be from a Variable node.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A Tensor. Must be one of the following types: int32, int64.
+A tensor of indices into ref.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+A Tensor. Must have the same type as ref. A tensor of updated values
+to add to ref.
+END
+ }
+ out_arg {
+ name: "output_ref"
+ description: <<END
+Same as ref. Returned as a convenience for operations that want
+to use the updated values after the update is done.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+An optional bool. Defaults to True. If True, the assignment will
+be protected by a lock; otherwise the behavior is undefined,
+but may exhibit less contention.
+END
+ }
+ summary: "Applies sparse addition between `updates` and individual values or slices"
+ description: <<END
+within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
+elements. In Python, that addition would look like this:
+
+ ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+ indices = tf.constant([[4], [3], [1], [7]])
+ updates = tf.constant([9, 10, 11, 12])
+ add = tf.scatter_nd_add(ref, indices, updates)
+ with tf.Session() as sess:
+ print sess.run(add)
+
+The resulting update to ref would look like this:
+
+ [1, 13, 3, 14, 14, 6, 7, 20]
+
+See @{tf.scatter_nd} for more details about how to make updates to
+slices.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt
new file mode 100644
index 0000000000..e5c64c2b90
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt
@@ -0,0 +1,68 @@
+op {
+ graph_op_name: "ScatterNdNonAliasingAdd"
+ in_arg {
+ name: "input"
+ description: <<END
+A Tensor.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A Tensor. Must be one of the following types: `int32`, `int64`.
+A tensor of indices into `input`.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+A Tensor. Must have the same type as ref. A tensor of updated values
+to add to `input`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A `Tensor` with the same shape as `input`, containing values of `input`
+updated with `updates`.
+END
+ }
+ summary: "Applies sparse addition to `input` using individual values or slices"
+ description: <<END
+from `updates` according to indices `indices`. The updates are non-aliasing:
+`input` is only modified in-place if no other operations will use it.
+Otherwise, a copy of `input` is made. This operation has a gradient with
+respect to both `input` and `updates`.
+
+`input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `input`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or `(P-K)`-dimensional slices
+(if `K < P`) along the `K`th dimension of `input`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].
+```
+
+For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
+elements. In Python, that addition would look like this:
+
+ input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
+ indices = tf.constant([[4], [3], [1], [7]])
+ updates = tf.constant([9, 10, 11, 12])
+ output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
+ with tf.Session() as sess:
+ print(sess.run(output))
+
+The resulting value `output` would look like this:
+
+ [1, 13, 3, 14, 14, 6, 7, 20]
+
+See @{tf.scatter_nd} for more details about how to make updates to slices.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt
new file mode 100644
index 0000000000..333db017f5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt
@@ -0,0 +1,74 @@
+op {
+ graph_op_name: "ScatterNdSub"
+ in_arg {
+ name: "ref"
+ description: <<END
+A mutable Tensor. Should be from a Variable node.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A Tensor. Must be one of the following types: int32, int64.
+A tensor of indices into ref.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+A Tensor. Must have the same type as ref. A tensor of updated values
+to subtract from ref.
+END
+ }
+ out_arg {
+ name: "output_ref"
+ description: <<END
+Same as ref. Returned as a convenience for operations that want
+to use the updated values after the update is done.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+An optional bool. Defaults to True. If True, the assignment will
+be protected by a lock; otherwise the behavior is undefined,
+but may exhibit less contention.
+END
+ }
+ summary: "Applies sparse subtraction between `updates` and individual values or slices"
+ description: <<END
+within a given variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to subtract 4 scattered elements from a rank-1 tensor
+with 8 elements. In Python, that subtraction would look like this:
+
+ ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+ indices = tf.constant([[4], [3], [1], [7]])
+ updates = tf.constant([9, 10, 11, 12])
+ sub = tf.scatter_nd_sub(ref, indices, updates)
+ with tf.Session() as sess:
+ print sess.run(sub)
+
+The resulting update to ref would look like this:
+
+ [1, -9, 3, -6, -4, 6, 7, -4]
+
+See @{tf.scatter_nd} for more details about how to make updates to
+slices.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt
new file mode 100644
index 0000000000..33d98262d5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt
@@ -0,0 +1,76 @@
+op {
+ graph_op_name: "ScatterNdUpdate"
+ in_arg {
+ name: "ref"
+ description: <<END
+A mutable Tensor. Should be from a Variable node.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A Tensor. Must be one of the following types: int32, int64.
+A tensor of indices into ref.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+A Tensor. Must have the same type as ref. A tensor of updated
+values to add to ref.
+END
+ }
+ out_arg {
+ name: "output_ref"
+ description: <<END
+Same as ref. Returned as a convenience for operations that want to
+use the updated values after the update is done.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+An optional bool. Defaults to True. If True, the assignment will
+be protected by a lock; otherwise the behavior is undefined,
+but may exhibit less contention.
+END
+ }
+ summary: "Applies sparse `updates` to individual values or slices within a given"
+ description: <<END
+variable according to `indices`.
+
+`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+
+`indices` must be integer tensor, containing indices into `ref`.
+It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+
+The innermost dimension of `indices` (with length `K`) corresponds to
+indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+dimension of `ref`.
+
+`updates` is `Tensor` of rank `Q-1+P-K` with shape:
+
+```
+[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+```
+
+For example, say we want to update 4 scattered elements to a rank-1 tensor to
+8 elements. In Python, that update would look like this:
+
+```python
+ ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+ indices = tf.constant([[4], [3], [1] ,[7]])
+ updates = tf.constant([9, 10, 11, 12])
+ update = tf.scatter_nd_update(ref, indices, updates)
+ with tf.Session() as sess:
+ print sess.run(update)
+```
+
+The resulting update to ref would look like this:
+
+ [1, 11, 3, 10, 9, 6, 7, 12]
+
+See @{tf.scatter_nd} for more details about how to make updates to
+slices.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterSub.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterSub.pbtxt
new file mode 100644
index 0000000000..c0d3a4a133
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterSub.pbtxt
@@ -0,0 +1,60 @@
+op {
+ graph_op_name: "ScatterSub"
+ in_arg {
+ name: "ref"
+ description: <<END
+Should be from a `Variable` node.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A tensor of indices into the first dimension of `ref`.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+A tensor of updated values to subtract from `ref`.
+END
+ }
+ out_arg {
+ name: "output_ref"
+ description: <<END
+= Same as `ref`. Returned as a convenience for operations that want
+to use the updated values after the update is done.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the subtraction will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Subtracts sparse updates to a variable reference."
+ description: <<END
+```python
+ # Scalar indices
+ ref[indices, ...] -= updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] -= updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
+```
+
+This operation outputs `ref` after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+
+Duplicate entries are handled correctly: if multiple `indices` reference
+the same location, their (negated) contributions add.
+
+Requires `updates.shape = indices.shape + ref.shape[1:]`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/ScatterSub.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterUpdate.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterUpdate.pbtxt
new file mode 100644
index 0000000000..c44dbbd233
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterUpdate.pbtxt
@@ -0,0 +1,63 @@
+op {
+ graph_op_name: "ScatterUpdate"
+ in_arg {
+ name: "ref"
+ description: <<END
+Should be from a `Variable` node.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A tensor of indices into the first dimension of `ref`.
+END
+ }
+ in_arg {
+ name: "updates"
+ description: <<END
+A tensor of updated values to store in `ref`.
+END
+ }
+ out_arg {
+ name: "output_ref"
+ description: <<END
+= Same as `ref`. Returned as a convenience for operations that want
+to use the updated values after the update is done.
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the assignment will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Applies sparse updates to a variable reference."
+ description: <<END
+This operation computes
+
+```python
+ # Scalar indices
+ ref[indices, ...] = updates[...]
+
+ # Vector indices (for each i)
+ ref[indices[i], ...] = updates[i, ...]
+
+ # High rank indices (for each i, ..., j)
+ ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
+```
+
+This operation outputs `ref` after the update is done.
+This makes it easier to chain operations that need to use the reset value.
+
+If values in `ref` is to be updated more than once, because there are
+duplicate entries in `indices`, the order at which the updates happen
+for each value is undefined.
+
+Requires `updates.shape = indices.shape + ref.shape[1:]`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SdcaFprint.pbtxt b/tensorflow/core/api_def/base_api/api_def_SdcaFprint.pbtxt
new file mode 100644
index 0000000000..829840d04a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SdcaFprint.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "SdcaFprint"
+ in_arg {
+ name: "input"
+ description: <<END
+vector of strings to compute fingerprints on.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+a (N,2) shaped matrix where N is the number of elements in the input
+vector. Each row contains the low and high parts of the fingerprint.
+END
+ }
+ summary: "Computes fingerprints of the input strings."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SdcaOptimizer.pbtxt b/tensorflow/core/api_def/base_api/api_def_SdcaOptimizer.pbtxt
new file mode 100644
index 0000000000..b0b58ac00e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SdcaOptimizer.pbtxt
@@ -0,0 +1,167 @@
+op {
+ graph_op_name: "SdcaOptimizer"
+ in_arg {
+ name: "sparse_example_indices"
+ description: <<END
+a list of vectors which contain example indices.
+END
+ }
+ in_arg {
+ name: "sparse_feature_indices"
+ description: <<END
+a list of vectors which contain feature indices.
+END
+ }
+ in_arg {
+ name: "sparse_feature_values"
+ description: <<END
+a list of vectors which contains feature value
+associated with each feature group.
+END
+ }
+ in_arg {
+ name: "dense_features"
+ description: <<END
+a list of matrices which contains the dense feature values.
+END
+ }
+ in_arg {
+ name: "example_weights"
+ description: <<END
+a vector which contains the weight associated with each
+example.
+END
+ }
+ in_arg {
+ name: "example_labels"
+ description: <<END
+a vector which contains the label/target associated with each
+example.
+END
+ }
+ in_arg {
+ name: "sparse_indices"
+ description: <<END
+a list of vectors where each value is the indices which has
+corresponding weights in sparse_weights. This field maybe omitted for the
+dense approach.
+END
+ }
+ in_arg {
+ name: "sparse_weights"
+ description: <<END
+a list of vectors where each value is the weight associated with
+a sparse feature group.
+END
+ }
+ in_arg {
+ name: "dense_weights"
+ description: <<END
+a list of vectors where the values are the weights associated
+with a dense feature group.
+END
+ }
+ in_arg {
+ name: "example_state_data"
+ description: <<END
+a list of vectors containing the example state data.
+END
+ }
+ out_arg {
+ name: "out_example_state_data"
+ description: <<END
+a list of vectors containing the updated example state
+data.
+END
+ }
+ out_arg {
+ name: "out_delta_sparse_weights"
+ description: <<END
+a list of vectors where each value is the delta
+weights associated with a sparse feature group.
+END
+ }
+ out_arg {
+ name: "out_delta_dense_weights"
+ description: <<END
+a list of vectors where the values are the delta
+weights associated with a dense feature group.
+END
+ }
+ attr {
+ name: "loss_type"
+ description: <<END
+Type of the primal loss. Currently SdcaSolver supports logistic,
+squared and hinge losses.
+END
+ }
+ attr {
+ name: "adaptative"
+ description: <<END
+Whether to use Adapative SDCA for the inner loop.
+END
+ }
+ attr {
+ name: "num_sparse_features"
+ description: <<END
+Number of sparse feature groups to train on.
+END
+ }
+ attr {
+ name: "num_sparse_features_with_values"
+ description: <<END
+Number of sparse feature groups with values
+associated with it, otherwise implicitly treats values as 1.0.
+END
+ }
+ attr {
+ name: "num_dense_features"
+ description: <<END
+Number of dense feature groups to train on.
+END
+ }
+ attr {
+ name: "l1"
+ description: <<END
+Symmetric l1 regularization strength.
+END
+ }
+ attr {
+ name: "l2"
+ description: <<END
+Symmetric l2 regularization strength.
+END
+ }
+ attr {
+ name: "num_loss_partitions"
+ description: <<END
+Number of partitions of the global loss function.
+END
+ }
+ attr {
+ name: "num_inner_iterations"
+ description: <<END
+Number of iterations per mini-batch.
+END
+ }
+ summary: "Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for"
+ description: <<END
+linear models with L1 + L2 regularization. As global optimization objective is
+strongly-convex, the optimizer optimizes the dual objective at each step. The
+optimizer applies each update one example at a time. Examples are sampled
+uniformly, and the optimizer is learning rate free and enjoys linear convergence
+rate.
+
+[Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
+Shai Shalev-Shwartz, Tong Zhang. 2012
+
+$$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
+
+[Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
+Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
+Peter Richtarik, Martin Takac. 2015
+
+[Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
+Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SdcaShrinkL1.pbtxt b/tensorflow/core/api_def/base_api/api_def_SdcaShrinkL1.pbtxt
new file mode 100644
index 0000000000..8e723c169d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SdcaShrinkL1.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "SdcaShrinkL1"
+ in_arg {
+ name: "weights"
+ description: <<END
+a list of vectors where each value is the weight associated with a
+feature group.
+END
+ }
+ attr {
+ name: "num_features"
+ description: <<END
+Number of feature groups to apply shrinking step.
+END
+ }
+ attr {
+ name: "l1"
+ description: <<END
+Symmetric l1 regularization strength.
+END
+ }
+ attr {
+ name: "l2"
+ description: <<END
+Symmetric l2 regularization strength. Should be a positive float.
+END
+ }
+ summary: "Applies L1 regularization shrink step on the parameters."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SegmentMax.pbtxt b/tensorflow/core/api_def/base_api/api_def_SegmentMax.pbtxt
new file mode 100644
index 0000000000..db890cb2f5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SegmentMax.pbtxt
@@ -0,0 +1,32 @@
+op {
+ graph_op_name: "SegmentMax"
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+A 1-D tensor whose rank is equal to the rank of `data`'s
+first dimension. Values should be sorted and can be repeated.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.
+END
+ }
+ summary: "Computes the maximum along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+\\(output_i = \max_j(data_j)\\) where `max` is over `j` such
+that `segment_ids[j] == i`.
+
+If the max is empty for a given segment ID `i`, `output[i] = 0`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SegmentMean.pbtxt b/tensorflow/core/api_def/base_api/api_def_SegmentMean.pbtxt
new file mode 100644
index 0000000000..4713c52310
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SegmentMean.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "SegmentMean"
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+A 1-D tensor whose rank is equal to the rank of `data`'s
+first dimension. Values should be sorted and can be repeated.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.
+END
+ }
+ summary: "Computes the mean along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+\\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
+over `j` such that `segment_ids[j] == i` and `N` is the total number of
+values summed.
+
+If the mean is empty for a given segment ID `i`, `output[i] = 0`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SegmentMin.pbtxt b/tensorflow/core/api_def/base_api/api_def_SegmentMin.pbtxt
new file mode 100644
index 0000000000..6316bfd1a5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SegmentMin.pbtxt
@@ -0,0 +1,32 @@
+op {
+ graph_op_name: "SegmentMin"
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+A 1-D tensor whose rank is equal to the rank of `data`'s
+first dimension. Values should be sorted and can be repeated.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.
+END
+ }
+ summary: "Computes the minimum along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+\\(output_i = \min_j(data_j)\\) where `min` is over `j` such
+that `segment_ids[j] == i`.
+
+If the min is empty for a given segment ID `i`, `output[i] = 0`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SegmentProd.pbtxt b/tensorflow/core/api_def/base_api/api_def_SegmentProd.pbtxt
new file mode 100644
index 0000000000..a16d03d467
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SegmentProd.pbtxt
@@ -0,0 +1,32 @@
+op {
+ graph_op_name: "SegmentProd"
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+A 1-D tensor whose rank is equal to the rank of `data`'s
+first dimension. Values should be sorted and can be repeated.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.
+END
+ }
+ summary: "Computes the product along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+\\(output_i = \prod_j data_j\\) where the product is over `j` such
+that `segment_ids[j] == i`.
+
+If the product is empty for a given segment ID `i`, `output[i] = 1`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SegmentSum.pbtxt b/tensorflow/core/api_def/base_api/api_def_SegmentSum.pbtxt
new file mode 100644
index 0000000000..0686e17f9b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SegmentSum.pbtxt
@@ -0,0 +1,32 @@
+op {
+ graph_op_name: "SegmentSum"
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+A 1-D tensor whose rank is equal to the rank of `data`'s
+first dimension. Values should be sorted and can be repeated.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.
+END
+ }
+ summary: "Computes the sum along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+\\(output_i = \sum_j data_j\\) where sum is over `j` such
+that `segment_ids[j] == i`.
+
+If the sum is empty for a given segment ID `i`, `output[i] = 0`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Select.pbtxt b/tensorflow/core/api_def/base_api/api_def_Select.pbtxt
new file mode 100644
index 0000000000..456ea8c01e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Select.pbtxt
@@ -0,0 +1,69 @@
+op {
+ graph_op_name: "Select"
+ endpoint {
+ name: "Where3"
+ }
+ in_arg {
+ name: "t"
+ rename_to: "x"
+ description: <<END
+= A `Tensor` which may have the same shape as `condition`.
+If `condition` is rank 1, `t` may have higher rank,
+but its first dimension must match the size of `condition`.
+END
+ }
+ in_arg {
+ name: "e"
+ rename_to: "y"
+ description: <<END
+= A `Tensor` with the same type and shape as `t`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+= A `Tensor` with the same type and shape as `t` and `e`.
+END
+ }
+ summary: "Selects elements from `t` or `e`, depending on `condition`."
+ description: <<END
+The `t`, and `e` tensors must all have the same shape, and the
+output will also have that shape.
+
+The `condition` tensor must be a scalar if `t` and `e` are scalars.
+If `t` and `e` are vectors or higher rank, then `condition` must be either a
+scalar, a vector with size matching the first dimension of `t`, or must have
+the same shape as `t`.
+
+The `condition` tensor acts as a mask that chooses, based on the value at each
+element, whether the corresponding element / row in the output should be
+taken from `t` (if true) or `e` (if false).
+
+If `condition` is a vector and `t` and `e` are higher rank matrices, then
+it chooses which row (outer dimension) to copy from `t` and `e`.
+If `condition` has the same shape as `t` and `e`, then it chooses which
+element to copy from `t` and `e`.
+
+For example:
+
+```python
+# 'condition' tensor is [[True, False]
+# [False, True]]
+# 't' is [[1, 2],
+# [3, 4]]
+# 'e' is [[5, 6],
+# [7, 8]]
+select(condition, t, e) # => [[1, 6], [7, 4]]
+
+
+# 'condition' tensor is [True, False]
+# 't' is [[1, 2],
+# [3, 4]]
+# 'e' is [[5, 6],
+# [7, 8]]
+select(condition, t, e) ==> [[1, 2],
+ [7, 8]]
+
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SelfAdjointEig.pbtxt b/tensorflow/core/api_def/base_api/api_def_SelfAdjointEig.pbtxt
new file mode 100644
index 0000000000..51d63eeb56
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SelfAdjointEig.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "SelfAdjointEig"
+ in_arg {
+ name: "input"
+ description: <<END
+Shape is `[..., M, M]`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Shape is `[..., M+1, M]`.
+END
+ }
+ summary: "Computes the Eigen Decomposition of a batch of square self-adjoint matrices."
+ description: <<END
+The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+form square matrices, with the same constraints as the single matrix
+SelfAdjointEig.
+
+The result is a [..., M+1, M] matrix with [..., 0,:] containing the
+eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SelfAdjointEigV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_SelfAdjointEigV2.pbtxt
new file mode 100644
index 0000000000..4a5e125258
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SelfAdjointEigV2.pbtxt
@@ -0,0 +1,44 @@
+op {
+ graph_op_name: "SelfAdjointEigV2"
+ endpoint {
+ name: "SelfAdjointEig"
+ }
+ in_arg {
+ name: "input"
+ description: <<END
+`Tensor` input of shape `[N, N]`.
+END
+ }
+ out_arg {
+ name: "e"
+ description: <<END
+Eigenvalues. Shape is `[N]`.
+END
+ }
+ out_arg {
+ name: "v"
+ description: <<END
+Eigenvectors. Shape is `[N, N]`.
+END
+ }
+ attr {
+ name: "compute_v"
+ description: <<END
+If `True` then eigenvectors will be computed and returned in `v`.
+Otherwise, only the eigenvalues will be computed.
+END
+ }
+ summary: "Computes the eigen decomposition of one or more square self-adjoint matrices."
+ description: <<END
+Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
+`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.
+
+```python
+# a is a tensor.
+# e is a tensor of eigenvalues.
+# v is a tensor of eigenvectors.
+e, v = self_adjoint_eig(a)
+e = self_adjoint_eig(a, compute_v=False)
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Selu.pbtxt b/tensorflow/core/api_def/base_api/api_def_Selu.pbtxt
new file mode 100644
index 0000000000..cbe76de415
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Selu.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "Selu"
+ summary: "Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`"
+ description: <<END
+if < 0, `scale * features` otherwise.
+
+See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SeluGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SeluGrad.pbtxt
new file mode 100644
index 0000000000..b5180b73d2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SeluGrad.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "SeluGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "gradients"
+ description: <<END
+The backpropagated gradients to the corresponding Selu operation.
+END
+ }
+ in_arg {
+ name: "outputs"
+ description: <<END
+The outputs of the corresponding Selu operation.
+END
+ }
+ out_arg {
+ name: "backprops"
+ description: <<END
+The gradients: `gradients * (outputs + scale * alpha)`
+if outputs < 0, `scale * gradients` otherwise.
+END
+ }
+ summary: "Computes gradients for the scaled exponential linear (Selu) operation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SerializeIterator.pbtxt b/tensorflow/core/api_def/base_api/api_def_SerializeIterator.pbtxt
new file mode 100644
index 0000000000..e24b122006
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SerializeIterator.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "SerializeIterator"
+ in_arg {
+ name: "resource_handle"
+ description: <<END
+A handle to an iterator resource.
+END
+ }
+ out_arg {
+ name: "serialized"
+ description: <<END
+A variant tensor storing the state of the iterator contained in the
+resource.
+END
+ }
+ summary: "Converts the given `resource_handle` representing an iterator to a variant tensor."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SerializeManySparse.pbtxt b/tensorflow/core/api_def/base_api/api_def_SerializeManySparse.pbtxt
new file mode 100644
index 0000000000..0010bca0b0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SerializeManySparse.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "SerializeManySparse"
+ in_arg {
+ name: "sparse_indices"
+ description: <<END
+2-D. The `indices` of the minibatch `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "sparse_values"
+ description: <<END
+1-D. The `values` of the minibatch `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "sparse_shape"
+ description: <<END
+1-D. The `shape` of the minibatch `SparseTensor`.
+END
+ }
+ summary: "Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`."
+ description: <<END
+The `SparseTensor` must have rank `R` greater than 1, and the first dimension
+is treated as the minibatch dimension. Elements of the `SparseTensor`
+must be sorted in increasing order of this first dimension. The serialized
+`SparseTensor` objects going into each row of `serialized_sparse` will have
+rank `R-1`.
+
+The minibatch size `N` is extracted from `sparse_shape[0]`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SerializeSparse.pbtxt b/tensorflow/core/api_def/base_api/api_def_SerializeSparse.pbtxt
new file mode 100644
index 0000000000..bb4a352d48
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SerializeSparse.pbtxt
@@ -0,0 +1,22 @@
+op {
+ graph_op_name: "SerializeSparse"
+ in_arg {
+ name: "sparse_indices"
+ description: <<END
+2-D. The `indices` of the `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "sparse_values"
+ description: <<END
+1-D. The `values` of the `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "sparse_shape"
+ description: <<END
+1-D. The `shape` of the `SparseTensor`.
+END
+ }
+ summary: "Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SerializeTensor.pbtxt b/tensorflow/core/api_def/base_api/api_def_SerializeTensor.pbtxt
new file mode 100644
index 0000000000..48f7ba7aa1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SerializeTensor.pbtxt
@@ -0,0 +1,22 @@
+op {
+ graph_op_name: "SerializeTensor"
+ in_arg {
+ name: "tensor"
+ description: <<END
+A Tensor of type `T`.
+END
+ }
+ out_arg {
+ name: "serialized"
+ description: <<END
+A serialized TensorProto proto of the input tensor.
+END
+ }
+ attr {
+ name: "T"
+ description: <<END
+The type of the input tensor.
+END
+ }
+ summary: "Transforms a Tensor into a serialized TensorProto proto."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SetSize.pbtxt b/tensorflow/core/api_def/base_api/api_def_SetSize.pbtxt
new file mode 100644
index 0000000000..812537412e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SetSize.pbtxt
@@ -0,0 +1,38 @@
+op {
+ graph_op_name: "SetSize"
+ in_arg {
+ name: "set_indices"
+ description: <<END
+2D `Tensor`, indices of a `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "set_values"
+ description: <<END
+1D `Tensor`, values of a `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "set_shape"
+ description: <<END
+1D `Tensor`, shape of a `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "size"
+ description: <<END
+For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st
+`n-1` dimensions as `set`. Each value is the number of unique elements in
+the corresponding `[0...n-1]` dimension of `set`.
+END
+ }
+ summary: "Number of unique elements along last dimension of input `set`."
+ description: <<END
+Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
+and `set_shape`. The last dimension contains values in a set, duplicates are
+allowed but ignored.
+
+If `validate_indices` is `True`, this op validates the order and range of `set`
+indices.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Shape.pbtxt b/tensorflow/core/api_def/base_api/api_def_Shape.pbtxt
new file mode 100644
index 0000000000..4efb5384e0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Shape.pbtxt
@@ -0,0 +1,14 @@
+op {
+ graph_op_name: "Shape"
+ summary: "Returns the shape of a tensor."
+ description: <<END
+This operation returns a 1-D integer tensor representing the shape of `input`.
+
+For example:
+
+```
+# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
+shape(t) ==> [2, 2, 3]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ShapeN.pbtxt b/tensorflow/core/api_def/base_api/api_def_ShapeN.pbtxt
new file mode 100644
index 0000000000..aa38320f9b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ShapeN.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "ShapeN"
+ summary: "Returns shape of tensors."
+ description: <<END
+This operation returns N 1-D integer tensors representing shape of `input[i]s`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ShardedFilename.pbtxt b/tensorflow/core/api_def/base_api/api_def_ShardedFilename.pbtxt
new file mode 100644
index 0000000000..11d1352918
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ShardedFilename.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "ShardedFilename"
+ summary: "Generate a sharded filename. The filename is printf formatted as"
+ description: <<END
+ %s-%05d-of-%05d, basename, shard, num_shards.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ShardedFilespec.pbtxt b/tensorflow/core/api_def/base_api/api_def_ShardedFilespec.pbtxt
new file mode 100644
index 0000000000..ecf0a091e2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ShardedFilespec.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ShardedFilespec"
+ summary: "Generate a glob pattern matching all sharded file names."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ShuffleDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_ShuffleDataset.pbtxt
new file mode 100644
index 0000000000..b12d3af9d7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ShuffleDataset.pbtxt
@@ -0,0 +1,36 @@
+op {
+ graph_op_name: "ShuffleDataset"
+ in_arg {
+ name: "buffer_size"
+ description: <<END
+The number of output elements to buffer in an iterator over
+this dataset. Compare with the `min_after_dequeue` attr when creating a
+`RandomShuffleQueue`.
+END
+ }
+ in_arg {
+ name: "seed"
+ description: <<END
+A scalar seed for the random number generator. If either seed or
+seed2 is set to be non-zero, the random number generator is seeded
+by the given seed. Otherwise, a random seed is used.
+END
+ }
+ in_arg {
+ name: "seed2"
+ description: <<END
+A second scalar seed to avoid seed collision.
+END
+ }
+ attr {
+ name: "reshuffle_each_iteration"
+ description: <<END
+If true, each iterator over this dataset will be given
+a different pseudorandomly generated seed, based on a sequence seeded by the
+`seed` and `seed2` inputs. If false, each iterator will be given the same
+seed, and repeated iteration over this dataset will yield the exact same
+sequence of results.
+END
+ }
+ summary: "Creates a dataset that shuffles elements from `input_dataset` pseudorandomly."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Sigmoid.pbtxt b/tensorflow/core/api_def/base_api/api_def_Sigmoid.pbtxt
new file mode 100644
index 0000000000..300ab0cde6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Sigmoid.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "Sigmoid"
+ summary: "Computes sigmoid of `x` element-wise."
+ description: <<END
+Specifically, `y = 1 / (1 + exp(-x))`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SigmoidGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SigmoidGrad.pbtxt
new file mode 100644
index 0000000000..911d6c5eee
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SigmoidGrad.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "SigmoidGrad"
+ visibility: HIDDEN
+ summary: "Computes the gradient of the sigmoid of `x` wrt its input."
+ description: <<END
+Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
+`dy` is the corresponding input gradient.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Sign.pbtxt b/tensorflow/core/api_def/base_api/api_def_Sign.pbtxt
new file mode 100644
index 0000000000..4eb4be1a75
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Sign.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "Sign"
+ summary: "Returns an element-wise indication of the sign of a number."
+ description: <<END
+`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
+
+For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Sin.pbtxt b/tensorflow/core/api_def/base_api/api_def_Sin.pbtxt
new file mode 100644
index 0000000000..f4edefb66d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Sin.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Sin"
+ summary: "Computes sin of x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Sinh.pbtxt b/tensorflow/core/api_def/base_api/api_def_Sinh.pbtxt
new file mode 100644
index 0000000000..a6784e8a59
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Sinh.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Sinh"
+ summary: "Computes hyperbolic sine of x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Size.pbtxt b/tensorflow/core/api_def/base_api/api_def_Size.pbtxt
new file mode 100644
index 0000000000..6e6cb33085
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Size.pbtxt
@@ -0,0 +1,15 @@
+op {
+ graph_op_name: "Size"
+ summary: "Returns the size of a tensor."
+ description: <<END
+This operation returns an integer representing the number of elements in
+`input`.
+
+For example:
+
+```
+# 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
+size(t) ==> 12
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SkipDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_SkipDataset.pbtxt
new file mode 100644
index 0000000000..44e5bac79b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SkipDataset.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "SkipDataset"
+ in_arg {
+ name: "count"
+ description: <<END
+A scalar representing the number of elements from the `input_dataset`
+that should be skipped. If count is -1, skips everything.
+END
+ }
+ summary: "Creates a dataset that skips `count` elements from the `input_dataset`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Skipgram.pbtxt b/tensorflow/core/api_def/base_api/api_def_Skipgram.pbtxt
new file mode 100644
index 0000000000..d682954017
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Skipgram.pbtxt
@@ -0,0 +1,78 @@
+op {
+ graph_op_name: "Skipgram"
+ out_arg {
+ name: "vocab_word"
+ description: <<END
+A vector of words in the corpus.
+END
+ }
+ out_arg {
+ name: "vocab_freq"
+ description: <<END
+Frequencies of words. Sorted in the non-ascending order.
+END
+ }
+ out_arg {
+ name: "words_per_epoch"
+ description: <<END
+Number of words per epoch in the data file.
+END
+ }
+ out_arg {
+ name: "current_epoch"
+ description: <<END
+The current epoch number.
+END
+ }
+ out_arg {
+ name: "total_words_processed"
+ description: <<END
+The total number of words processed so far.
+END
+ }
+ out_arg {
+ name: "examples"
+ description: <<END
+A vector of word ids.
+END
+ }
+ out_arg {
+ name: "labels"
+ description: <<END
+A vector of word ids.
+END
+ }
+ attr {
+ name: "filename"
+ description: <<END
+The corpus's text file name.
+END
+ }
+ attr {
+ name: "batch_size"
+ description: <<END
+The size of produced batch.
+END
+ }
+ attr {
+ name: "window_size"
+ description: <<END
+The number of words to predict to the left and right of the target.
+END
+ }
+ attr {
+ name: "min_count"
+ description: <<END
+The minimum number of word occurrences for it to be included in the
+vocabulary.
+END
+ }
+ attr {
+ name: "subsample"
+ description: <<END
+Threshold for word occurrence. Words that appear with higher
+frequency will be randomly down-sampled. Set to 0 to disable.
+END
+ }
+ summary: "Parses a text file and creates a batch of examples."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Slice.pbtxt b/tensorflow/core/api_def/base_api/api_def_Slice.pbtxt
new file mode 100644
index 0000000000..bd6ad26d1b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Slice.pbtxt
@@ -0,0 +1,28 @@
+op {
+ graph_op_name: "Slice"
+ in_arg {
+ name: "begin"
+ description: <<END
+begin[i] specifies the offset into the 'i'th dimension of
+'input' to slice from.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+size[i] specifies the number of elements of the 'i'th dimension
+of 'input' to slice. If size[i] is -1, all remaining elements in dimension
+i are included in the slice (i.e. this is equivalent to setting
+size[i] = input.dim_size(i) - begin[i]).
+END
+ }
+ summary: "Return a slice from \'input\'."
+ description: <<END
+The output tensor is a tensor with dimensions described by 'size'
+whose values are extracted from 'input' starting at the offsets in
+'begin'.
+
+*Requirements*:
+ 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Softmax.pbtxt b/tensorflow/core/api_def/base_api/api_def_Softmax.pbtxt
new file mode 100644
index 0000000000..43884824c9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Softmax.pbtxt
@@ -0,0 +1,21 @@
+op {
+ graph_op_name: "Softmax"
+ in_arg {
+ name: "logits"
+ description: <<END
+2-D with shape `[batch_size, num_classes]`.
+END
+ }
+ out_arg {
+ name: "softmax"
+ description: <<END
+Same shape as `logits`.
+END
+ }
+ summary: "Computes softmax activations."
+ description: <<END
+For each batch `i` and class `j` we have
+
+ softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow/core/api_def/base_api/api_def_SoftmaxCrossEntropyWithLogits.pbtxt
new file mode 100644
index 0000000000..973fbb8f6c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SoftmaxCrossEntropyWithLogits.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "SoftmaxCrossEntropyWithLogits"
+ in_arg {
+ name: "features"
+ description: <<END
+batch_size x num_classes matrix
+END
+ }
+ in_arg {
+ name: "labels"
+ description: <<END
+batch_size x num_classes matrix
+The caller must ensure that each batch of labels represents a valid
+probability distribution.
+END
+ }
+ out_arg {
+ name: "loss"
+ description: <<END
+Per example loss (batch_size vector).
+END
+ }
+ out_arg {
+ name: "backprop"
+ description: <<END
+backpropagated gradients (batch_size x num_classes matrix).
+END
+ }
+ summary: "Computes softmax cross entropy cost and gradients to backpropagate."
+ description: <<END
+Inputs are the logits, not probabilities.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Softplus.pbtxt b/tensorflow/core/api_def/base_api/api_def_Softplus.pbtxt
new file mode 100644
index 0000000000..83f6aad877
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Softplus.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Softplus"
+ summary: "Computes softplus: `log(exp(features) + 1)`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SoftplusGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SoftplusGrad.pbtxt
new file mode 100644
index 0000000000..96e4d8cb5a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SoftplusGrad.pbtxt
@@ -0,0 +1,23 @@
+op {
+ graph_op_name: "SoftplusGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "gradients"
+ description: <<END
+The backpropagated gradients to the corresponding softplus operation.
+END
+ }
+ in_arg {
+ name: "features"
+ description: <<END
+The features passed as input to the corresponding softplus operation.
+END
+ }
+ out_arg {
+ name: "backprops"
+ description: <<END
+The gradients: `gradients / (1 + exp(-features))`.
+END
+ }
+ summary: "Computes softplus gradients for a softplus operation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Softsign.pbtxt b/tensorflow/core/api_def/base_api/api_def_Softsign.pbtxt
new file mode 100644
index 0000000000..1ae451ec44
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Softsign.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Softsign"
+ summary: "Computes softsign: `features / (abs(features) + 1)`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SoftsignGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SoftsignGrad.pbtxt
new file mode 100644
index 0000000000..23696f12a1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SoftsignGrad.pbtxt
@@ -0,0 +1,23 @@
+op {
+ graph_op_name: "SoftsignGrad"
+ visibility: HIDDEN
+ in_arg {
+ name: "gradients"
+ description: <<END
+The backpropagated gradients to the corresponding softsign operation.
+END
+ }
+ in_arg {
+ name: "features"
+ description: <<END
+The features passed as input to the corresponding softsign operation.
+END
+ }
+ out_arg {
+ name: "backprops"
+ description: <<END
+The gradients: `gradients / (1 + abs(features)) ** 2`.
+END
+ }
+ summary: "Computes softsign gradients for a softsign operation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SpaceToBatch.pbtxt b/tensorflow/core/api_def/base_api/api_def_SpaceToBatch.pbtxt
new file mode 100644
index 0000000000..de6182807a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SpaceToBatch.pbtxt
@@ -0,0 +1,109 @@
+op {
+ graph_op_name: "SpaceToBatch"
+ in_arg {
+ name: "input"
+ description: <<END
+4-D with shape `[batch, height, width, depth]`.
+END
+ }
+ in_arg {
+ name: "paddings"
+ description: <<END
+2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
+ the padding of the input with zeros across the spatial dimensions as follows:
+
+ paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
+
+ The effective spatial dimensions of the zero-padded input tensor will be:
+
+ height_pad = pad_top + height + pad_bottom
+ width_pad = pad_left + width + pad_right
+
+The attr `block_size` must be greater than one. It indicates the block size.
+
+ * Non-overlapping blocks of size `block_size x block size` in the height and
+ width dimensions are rearranged into the batch dimension at each location.
+ * The batch of the output tensor is `batch * block_size * block_size`.
+ * Both height_pad and width_pad must be divisible by block_size.
+
+The shape of the output will be:
+
+ [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
+ depth]
+
+Some examples:
+
+(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
+
+```
+x = [[[[1], [2]], [[3], [4]]]]
+```
+
+The output tensor has shape `[4, 1, 1, 1]` and value:
+
+```
+[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
+```
+
+(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
+
+```
+x = [[[[1, 2, 3], [4, 5, 6]],
+ [[7, 8, 9], [10, 11, 12]]]]
+```
+
+The output tensor has shape `[4, 1, 1, 3]` and value:
+
+```
+[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
+```
+
+(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
+
+```
+x = [[[[1], [2], [3], [4]],
+ [[5], [6], [7], [8]],
+ [[9], [10], [11], [12]],
+ [[13], [14], [15], [16]]]]
+```
+
+The output tensor has shape `[4, 2, 2, 1]` and value:
+
+```
+x = [[[[1], [3]], [[9], [11]]],
+ [[[2], [4]], [[10], [12]]],
+ [[[5], [7]], [[13], [15]]],
+ [[[6], [8]], [[14], [16]]]]
+```
+
+(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
+
+```
+x = [[[[1], [2], [3], [4]],
+ [[5], [6], [7], [8]]],
+ [[[9], [10], [11], [12]],
+ [[13], [14], [15], [16]]]]
+```
+
+The output tensor has shape `[8, 1, 2, 1]` and value:
+
+```
+x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
+ [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
+```
+
+Among others, this operation is useful for reducing atrous convolution into
+regular convolution.
+END
+ }
+ summary: "SpaceToBatch for 4-D tensors of type T."
+ description: <<END
+This is a legacy version of the more general SpaceToBatchND.
+
+Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
+More specifically, this op outputs a copy of the input tensor where values from
+the `height` and `width` dimensions are moved to the `batch` dimension. After
+the zero-padding, both `height` and `width` of the input must be divisible by the
+block size.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SpaceToBatchND.pbtxt b/tensorflow/core/api_def/base_api/api_def_SpaceToBatchND.pbtxt
new file mode 100644
index 0000000000..2c5e337919
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SpaceToBatchND.pbtxt
@@ -0,0 +1,140 @@
+op {
+ graph_op_name: "SpaceToBatchND"
+ in_arg {
+ name: "input"
+ description: <<END
+N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
+where spatial_shape has `M` dimensions.
+END
+ }
+ in_arg {
+ name: "block_shape"
+ description: <<END
+1-D with shape `[M]`, all values must be >= 1.
+END
+ }
+ in_arg {
+ name: "paddings"
+ description: <<END
+2-D with shape `[M, 2]`, all values must be >= 0.
+ `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
+ `i + 1`, which corresponds to spatial dimension `i`. It is required that
+ `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
+
+This operation is equivalent to the following steps:
+
+1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
+ input according to `paddings` to produce `padded` of shape `padded_shape`.
+
+2. Reshape `padded` to `reshaped_padded` of shape:
+
+ [batch] +
+ [padded_shape[1] / block_shape[0],
+ block_shape[0],
+ ...,
+ padded_shape[M] / block_shape[M-1],
+ block_shape[M-1]] +
+ remaining_shape
+
+3. Permute dimensions of `reshaped_padded` to produce
+ `permuted_reshaped_padded` of shape:
+
+ block_shape +
+ [batch] +
+ [padded_shape[1] / block_shape[0],
+ ...,
+ padded_shape[M] / block_shape[M-1]] +
+ remaining_shape
+
+4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
+ dimension, producing an output tensor of shape:
+
+ [batch * prod(block_shape)] +
+ [padded_shape[1] / block_shape[0],
+ ...,
+ padded_shape[M] / block_shape[M-1]] +
+ remaining_shape
+
+Some examples:
+
+(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
+ `paddings = [[0, 0], [0, 0]]`:
+
+```
+x = [[[[1], [2]], [[3], [4]]]]
+```
+
+The output tensor has shape `[4, 1, 1, 1]` and value:
+
+```
+[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
+```
+
+(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
+ `paddings = [[0, 0], [0, 0]]`:
+
+```
+x = [[[[1, 2, 3], [4, 5, 6]],
+ [[7, 8, 9], [10, 11, 12]]]]
+```
+
+The output tensor has shape `[4, 1, 1, 3]` and value:
+
+```
+[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
+```
+
+(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
+ `paddings = [[0, 0], [0, 0]]`:
+
+```
+x = [[[[1], [2], [3], [4]],
+ [[5], [6], [7], [8]],
+ [[9], [10], [11], [12]],
+ [[13], [14], [15], [16]]]]
+```
+
+The output tensor has shape `[4, 2, 2, 1]` and value:
+
+```
+x = [[[[1], [3]], [[9], [11]]],
+ [[[2], [4]], [[10], [12]]],
+ [[[5], [7]], [[13], [15]]],
+ [[[6], [8]], [[14], [16]]]]
+```
+
+(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
+ paddings = `[[0, 0], [2, 0]]`:
+
+```
+x = [[[[1], [2], [3], [4]],
+ [[5], [6], [7], [8]]],
+ [[[9], [10], [11], [12]],
+ [[13], [14], [15], [16]]]]
+```
+
+The output tensor has shape `[8, 1, 3, 1]` and value:
+
+```
+x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
+ [[[0], [2], [4]]], [[[0], [10], [12]]],
+ [[[0], [5], [7]]], [[[0], [13], [15]]],
+ [[[0], [6], [8]]], [[[0], [14], [16]]]]
+```
+
+Among others, this operation is useful for reducing atrous convolution into
+regular convolution.
+END
+ }
+ summary: "SpaceToBatch for N-D tensors of type T."
+ description: <<END
+This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
+grid of blocks of shape `block_shape`, and interleaves these blocks with the
+"batch" dimension (0) such that in the output, the spatial dimensions
+`[1, ..., M]` correspond to the position within the grid, and the batch
+dimension combines both the position within a spatial block and the original
+batch position. Prior to division into blocks, the spatial dimensions of the
+input are optionally zero padded according to `paddings`. See below for a
+precise description.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SpaceToDepth.pbtxt b/tensorflow/core/api_def/base_api/api_def_SpaceToDepth.pbtxt
new file mode 100644
index 0000000000..8fd3966f70
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SpaceToDepth.pbtxt
@@ -0,0 +1,95 @@
+op {
+ graph_op_name: "SpaceToDepth"
+ attr {
+ name: "block_size"
+ description: <<END
+The size of the spatial block.
+END
+ }
+ summary: "SpaceToDepth for tensors of type T."
+ description: <<END
+Rearranges blocks of spatial data, into depth. More specifically,
+this op outputs a copy of the input tensor where values from the `height`
+and `width` dimensions are moved to the `depth` dimension.
+The attr `block_size` indicates the input block size.
+
+ * Non-overlapping blocks of size `block_size x block size` are rearranged
+ into depth at each location.
+ * The depth of the output tensor is `block_size * block_size * input_depth`.
+ * The Y, X coordinates within each block of the input become the high order
+ component of the output channel index.
+ * The input tensor's height and width must be divisible by block_size.
+
+The `data_format` attr specifies the layout of the input and output tensors
+with the following options:
+ "NHWC": `[ batch, height, width, channels ]`
+ "NCHW": `[ batch, channels, height, width ]`
+ "NCHW_VECT_C":
+ `qint8 [ batch, channels / 4, height, width, channels % 4 ]`
+
+It is useful to consider the operation as transforming a 6-D Tensor.
+e.g. for data_format = NHWC,
+ Each element in the input tensor can be specified via 6 coordinates,
+ ordered by decreasing memory layout significance as:
+ n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates
+ within the output image, bX, bY means coordinates
+ within the input block, iC means input channels).
+ The output would be a transpose to the following layout:
+ n,oY,oX,bY,bX,iC
+
+This operation is useful for resizing the activations between convolutions
+(but keeping all data), e.g. instead of pooling. It is also useful for training
+purely convolutional models.
+
+For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
+block_size = 2:
+
+```
+x = [[[[1], [2]],
+ [[3], [4]]]]
+```
+
+This operation will output a tensor of shape `[1, 1, 1, 4]`:
+
+```
+[[[[1, 2, 3, 4]]]]
+```
+
+Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
+the corresponding output will have a single element (i.e. width and height are
+both 1) and will have a depth of 4 channels (1 * block_size * block_size).
+The output element shape is `[1, 1, 4]`.
+
+For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
+
+```
+x = [[[[1, 2, 3], [4, 5, 6]],
+ [[7, 8, 9], [10, 11, 12]]]]
+```
+
+This operation, for block_size of 2, will return the following tensor of shape
+`[1, 1, 1, 12]`
+
+```
+[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
+```
+
+Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
+
+```
+x = [[[[1], [2], [5], [6]],
+ [[3], [4], [7], [8]],
+ [[9], [10], [13], [14]],
+ [[11], [12], [15], [16]]]]
+```
+
+the operator will return the following tensor of shape `[1 2 2 4]`:
+
+```
+x = [[[[1, 2, 3, 4],
+ [5, 6, 7, 8]],
+ [[9, 10, 11, 12],
+ [13, 14, 15, 16]]]]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseAccumulatorApplyGradient.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseAccumulatorApplyGradient.pbtxt
new file mode 100644
index 0000000000..11c4980587
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseAccumulatorApplyGradient.pbtxt
@@ -0,0 +1,55 @@
+op {
+ graph_op_name: "SparseAccumulatorApplyGradient"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a accumulator.
+END
+ }
+ in_arg {
+ name: "local_step"
+ description: <<END
+The local_step value at which the sparse gradient was computed.
+END
+ }
+ in_arg {
+ name: "gradient_indices"
+ description: <<END
+Indices of the sparse gradient to be accumulated. Must be a
+vector.
+END
+ }
+ in_arg {
+ name: "gradient_values"
+ description: <<END
+Values are the non-zero slices of the gradient, and must have
+the same first dimension as indices, i.e., the nnz represented by indices and
+values must be consistent.
+END
+ }
+ in_arg {
+ name: "gradient_shape"
+ description: <<END
+Shape of the sparse gradient to be accumulated.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The data type of accumulated gradients. Needs to correspond to the type
+of the accumulator.
+END
+ }
+ attr {
+ name: "has_known_shape"
+ description: <<END
+Boolean indicating whether gradient_shape is unknown, in which
+case the input is ignored during validation.
+END
+ }
+ summary: "Applies a sparse gradient to a given accumulator."
+ description: <<END
+Does not add if local_step is smaller than the accumulator's
+global_step.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseAccumulatorTakeGradient.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseAccumulatorTakeGradient.pbtxt
new file mode 100644
index 0000000000..725bbaf501
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseAccumulatorTakeGradient.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "SparseAccumulatorTakeGradient"
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a SparseConditionalAccumulator.
+END
+ }
+ in_arg {
+ name: "num_required"
+ description: <<END
+Number of gradients required before we return an aggregate.
+END
+ }
+ out_arg {
+ name: "indices"
+ description: <<END
+Indices of the average of the accumulated sparse gradients.
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+Values of the average of the accumulated sparse gradients.
+END
+ }
+ out_arg {
+ name: "shape"
+ description: <<END
+Shape of the average of the accumulated sparse gradients.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The data type of accumulated gradients. Needs to correspond to the type
+of the accumulator.
+END
+ }
+ summary: "Extracts the average sparse gradient in a SparseConditionalAccumulator."
+ description: <<END
+The op will blocks until sufficient (i.e., more than num_required)
+gradients have been accumulated. If the accumulator has already
+aggregated more than num_required gradients, it will return its
+average of the accumulated gradients. Also automatically increments
+the recorded global_step in the accumulator by 1, and resets the
+aggregate to 0.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseAdd.pbtxt
new file mode 100644
index 0000000000..d2409aa3b2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseAdd.pbtxt
@@ -0,0 +1,62 @@
+op {
+ graph_op_name: "SparseAdd"
+ in_arg {
+ name: "a_indices"
+ description: <<END
+2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
+END
+ }
+ in_arg {
+ name: "a_values"
+ description: <<END
+1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector.
+END
+ }
+ in_arg {
+ name: "a_shape"
+ description: <<END
+1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
+END
+ }
+ in_arg {
+ name: "b_indices"
+ description: <<END
+2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
+END
+ }
+ in_arg {
+ name: "b_values"
+ description: <<END
+1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector.
+END
+ }
+ in_arg {
+ name: "b_shape"
+ description: <<END
+1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
+END
+ }
+ in_arg {
+ name: "thresh"
+ description: <<END
+0-D. The magnitude threshold that determines if an output value/index
+pair takes space.
+END
+ }
+ summary: "Adds two `SparseTensor` objects to produce another `SparseTensor`."
+ description: <<END
+The input `SparseTensor` objects' indices are assumed ordered in standard
+lexicographic order. If this is not the case, before this step run
+`SparseReorder` to restore index ordering.
+
+By default, if two values sum to zero at some index, the output `SparseTensor`
+would still include that particular location in its index, storing a zero in the
+corresponding value slot. To override this, callers can specify `thresh`,
+indicating that if the sum has a magnitude strictly smaller than `thresh`, its
+corresponding value and index would then not be included. In particular,
+`thresh == 0` (default) means everything is kept and actual thresholding happens
+only for a positive value.
+
+In the following shapes, `nnz` is the count after taking `thresh` into account.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseAddGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseAddGrad.pbtxt
new file mode 100644
index 0000000000..e5e0a7d9cb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseAddGrad.pbtxt
@@ -0,0 +1,50 @@
+op {
+ graph_op_name: "SparseAddGrad"
+ in_arg {
+ name: "backprop_val_grad"
+ description: <<END
+1-D with shape `[nnz(sum)]`. The gradient with respect to
+the non-empty values of the sum.
+END
+ }
+ in_arg {
+ name: "a_indices"
+ description: <<END
+2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
+END
+ }
+ in_arg {
+ name: "b_indices"
+ description: <<END
+2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
+END
+ }
+ in_arg {
+ name: "sum_indices"
+ description: <<END
+2-D. The `indices` of the sum `SparseTensor`, size
+`[nnz(sum), ndims]`.
+END
+ }
+ out_arg {
+ name: "a_val_grad"
+ description: <<END
+1-D with shape `[nnz(A)]`. The gradient with respect to the
+non-empty values of A.
+END
+ }
+ out_arg {
+ name: "b_val_grad"
+ description: <<END
+1-D with shape `[nnz(B)]`. The gradient with respect to the
+non-empty values of B.
+END
+ }
+ summary: "The gradient operator for the SparseAdd op."
+ description: <<END
+The SparseAdd op calculates A + B, where A, B, and the sum are all represented
+as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
+non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
+values of A and B.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyAdadelta.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyAdadelta.pbtxt
new file mode 100644
index 0000000000..15c1797d2d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyAdadelta.pbtxt
@@ -0,0 +1,59 @@
+op {
+ graph_op_name: "SparseApplyAdadelta"
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum_update"
+ description: <<END
+: Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Learning rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Constant factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var and accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "var: Should be from a Variable()."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyAdagrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyAdagrad.pbtxt
new file mode 100644
index 0000000000..1698e2def0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyAdagrad.pbtxt
@@ -0,0 +1,53 @@
+op {
+ graph_op_name: "SparseApplyAdagrad"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Learning rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
+ description: <<END
+That is for rows we have grad for, we update var and accum as follows:
+accum += grad * grad
+var -= lr * grad * (1 / sqrt(accum))
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyAdagradDA.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyAdagradDA.pbtxt
new file mode 100644
index 0000000000..a6878eb70b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyAdagradDA.pbtxt
@@ -0,0 +1,71 @@
+op {
+ graph_op_name: "SparseApplyAdagradDA"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "gradient_accumulator"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "gradient_squared_accumulator"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Learning rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "global_step"
+ description: <<END
+Training step number. Must be a scalar.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var and accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Update entries in \'*var\' and \'*accum\' according to the proximal adagrad scheme."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyCenteredRMSProp.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyCenteredRMSProp.pbtxt
new file mode 100644
index 0000000000..2c6a36bf45
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyCenteredRMSProp.pbtxt
@@ -0,0 +1,90 @@
+op {
+ graph_op_name: "SparseApplyCenteredRMSProp"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mg"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "ms"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mom"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Ridge term. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var, ms and mom.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var, mg, ms, and mom tensors is
+protected by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the centered RMSProp algorithm."
+ description: <<END
+The centered RMSProp algorithm uses an estimate of the centered second moment
+(i.e., the variance) for normalization, as opposed to regular RMSProp, which
+uses the (uncentered) second moment. This often helps with training, but is
+slightly more expensive in terms of computation and memory.
+
+Note that in dense implementation of this algorithm, mg, ms, and mom will
+update even if the grad is zero, but in this sparse implementation, mg, ms,
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+mean_grad = decay * mean_grad + (1-decay) * gradient
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyFtrl.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyFtrl.pbtxt
new file mode 100644
index 0000000000..524b5c5a47
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyFtrl.pbtxt
@@ -0,0 +1,80 @@
+op {
+ graph_op_name: "SparseApplyFtrl"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "linear"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "lr_power"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+That is for rows we have grad for, we update var, accum and linear as follows:
+accum_new = accum + grad * grad
+linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyFtrlV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyFtrlV2.pbtxt
new file mode 100644
index 0000000000..9247fb61b8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyFtrlV2.pbtxt
@@ -0,0 +1,82 @@
+op {
+ graph_op_name: "SparseApplyFtrlV2"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "linear"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 shrinkage regulariation. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "lr_power"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update relevant entries in \'*var\' according to the Ftrl-proximal scheme."
+ description: <<END
+That is for rows we have grad for, we update var, accum and linear as follows:
+grad_with_shrinkage = grad + 2 * l2_shrinkage * var
+accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
+linear += grad_with_shrinkage +
+ (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+accum = accum_new
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyMomentum.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyMomentum.pbtxt
new file mode 100644
index 0000000000..8d9ac9ea3f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyMomentum.pbtxt
@@ -0,0 +1,70 @@
+op {
+ graph_op_name: "SparseApplyMomentum"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Learning rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ in_arg {
+ name: "momentum"
+ description: <<END
+Momentum. Must be a scalar.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var and accum tensors will be protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ attr {
+ name: "use_nesterov"
+ description: <<END
+If `True`, the tensor passed to compute grad will be
+var - lr * momentum * accum, so in the end, the var you get is actually
+var - lr * momentum * accum.
+END
+ }
+ summary: "Update relevant entries in \'*var\' and \'*accum\' according to the momentum scheme."
+ description: <<END
+Set use_nesterov = True if you want to use Nesterov momentum.
+
+That is for rows we have grad for, we update var and accum as follows:
+
+accum = accum * momentum + grad
+var -= lr * accum
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalAdagrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalAdagrad.pbtxt
new file mode 100644
index 0000000000..80541b91c7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalAdagrad.pbtxt
@@ -0,0 +1,66 @@
+op {
+ graph_op_name: "SparseApplyProximalAdagrad"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "accum"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Learning rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, updating of the var and accum tensors will be protected by
+a lock; otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Sparse update entries in \'*var\' and \'*accum\' according to FOBOS algorithm."
+ description: <<END
+That is for rows we have grad for, we update var and accum as follows:
+accum += grad * grad
+prox_v = var
+prox_v -= lr * grad * (1 / sqrt(accum))
+var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalGradientDescent.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalGradientDescent.pbtxt
new file mode 100644
index 0000000000..5200e5516d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyProximalGradientDescent.pbtxt
@@ -0,0 +1,58 @@
+op {
+ graph_op_name: "SparseApplyProximalGradientDescent"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "alpha"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l1"
+ description: <<END
+L1 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "l2"
+ description: <<END
+L2 regularization. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var and accum.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If True, the subtraction will be protected by a lock;
+otherwise the behavior is undefined, but may exhibit less contention.
+END
+ }
+ summary: "Sparse update \'*var\' as FOBOS algorithm with fixed learning rate."
+ description: <<END
+That is for rows we have grad for, we update var as follows:
+prox_v = var - alpha * grad
+var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseApplyRMSProp.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseApplyRMSProp.pbtxt
new file mode 100644
index 0000000000..a4dbd608b8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseApplyRMSProp.pbtxt
@@ -0,0 +1,78 @@
+op {
+ graph_op_name: "SparseApplyRMSProp"
+ in_arg {
+ name: "var"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "ms"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "mom"
+ description: <<END
+Should be from a Variable().
+END
+ }
+ in_arg {
+ name: "lr"
+ description: <<END
+Scaling factor. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "rho"
+ description: <<END
+Decay rate. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "epsilon"
+ description: <<END
+Ridge term. Must be a scalar.
+END
+ }
+ in_arg {
+ name: "grad"
+ description: <<END
+The gradient.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+A vector of indices into the first dimension of var, ms and mom.
+END
+ }
+ out_arg {
+ name: "out"
+ description: <<END
+Same as "var".
+END
+ }
+ attr {
+ name: "use_locking"
+ description: <<END
+If `True`, updating of the var, ms, and mom tensors is protected
+by a lock; otherwise the behavior is undefined, but may exhibit less
+contention.
+END
+ }
+ summary: "Update \'*var\' according to the RMSProp algorithm."
+ description: <<END
+Note that in dense implementation of this algorithm, ms and mom will
+update even if the grad is zero, but in this sparse implementation, ms
+and mom will not update in iterations during which the grad is zero.
+
+mean_square = decay * mean_square + (1-decay) * gradient ** 2
+Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
+
+ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+var <- var - mom
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseConcat.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseConcat.pbtxt
new file mode 100644
index 0000000000..a72ae90475
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseConcat.pbtxt
@@ -0,0 +1,90 @@
+op {
+ graph_op_name: "SparseConcat"
+ in_arg {
+ name: "indices"
+ description: <<END
+2-D. Indices of each input `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+1-D. Non-empty values of each `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "shapes"
+ description: <<END
+1-D. Shapes of each `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "output_indices"
+ description: <<END
+2-D. Indices of the concatenated `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "output_values"
+ description: <<END
+1-D. Non-empty values of the concatenated `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "output_shape"
+ description: <<END
+1-D. Shape of the concatenated `SparseTensor`.
+END
+ }
+ attr {
+ name: "concat_dim"
+ description: <<END
+Dimension to concatenate along. Must be in range [-rank, rank),
+where rank is the number of dimensions in each input `SparseTensor`.
+END
+ }
+ summary: "Concatenates a list of `SparseTensor` along the specified dimension."
+ description: <<END
+Concatenation is with respect to the dense versions of these sparse tensors.
+It is assumed that each input is a `SparseTensor` whose elements are ordered
+along increasing dimension number.
+
+All inputs' shapes must match, except for the concat dimension. The
+`indices`, `values`, and `shapes` lists must have the same length.
+
+The output shape is identical to the inputs', except along the concat
+dimension, where it is the sum of the inputs' sizes along that dimension.
+
+The output elements will be resorted to preserve the sort order along
+increasing dimension number.
+
+This op runs in `O(M log M)` time, where `M` is the total number of non-empty
+values across all inputs. This is due to the need for an internal sort in
+order to concatenate efficiently across an arbitrary dimension.
+
+For example, if `concat_dim = 1` and the inputs are
+
+ sp_inputs[0]: shape = [2, 3]
+ [0, 2]: "a"
+ [1, 0]: "b"
+ [1, 1]: "c"
+
+ sp_inputs[1]: shape = [2, 4]
+ [0, 1]: "d"
+ [0, 2]: "e"
+
+then the output will be
+
+ shape = [2, 7]
+ [0, 2]: "a"
+ [0, 4]: "d"
+ [0, 5]: "e"
+ [1, 0]: "b"
+ [1, 1]: "c"
+
+Graphically this is equivalent to doing
+
+ [ a] concat [ d e ] = [ a d e ]
+ [b c ] [ ] [b c ]
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseConditionalAccumulator.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseConditionalAccumulator.pbtxt
new file mode 100644
index 0000000000..c367416f2a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseConditionalAccumulator.pbtxt
@@ -0,0 +1,44 @@
+op {
+ graph_op_name: "SparseConditionalAccumulator"
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the accumulator.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the value being accumulated.
+END
+ }
+ attr {
+ name: "shape"
+ description: <<END
+The shape of the values.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this accumulator is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this accumulator will be shared under the given name
+across multiple sessions.
+END
+ }
+ summary: "A conditional accumulator for aggregating sparse gradients."
+ description: <<END
+The accumulator accepts gradients marked with local_step greater or
+equal to the most recent global_step known to the accumulator. The
+average can be extracted from the accumulator, provided sufficient
+gradients have been accumulated. Extracting the average automatically
+resets the aggregate to 0, and increments the global_step recorded by
+the accumulator.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseCross.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseCross.pbtxt
new file mode 100644
index 0000000000..2aea6cfe4f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseCross.pbtxt
@@ -0,0 +1,106 @@
+op {
+ graph_op_name: "SparseCross"
+ in_arg {
+ name: "indices"
+ description: <<END
+2-D. Indices of each input `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+1-D. values of each `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "shapes"
+ description: <<END
+1-D. Shapes of each `SparseTensor`.
+END
+ }
+ in_arg {
+ name: "dense_inputs"
+ description: <<END
+2-D. Columns represented by dense `Tensor`.
+END
+ }
+ out_arg {
+ name: "output_indices"
+ description: <<END
+2-D. Indices of the concatenated `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "output_values"
+ description: <<END
+1-D. Non-empty values of the concatenated or hashed
+`SparseTensor`.
+END
+ }
+ out_arg {
+ name: "output_shape"
+ description: <<END
+1-D. Shape of the concatenated `SparseTensor`.
+END
+ }
+ attr {
+ name: "hashed_output"
+ description: <<END
+If true, returns the hash of the cross instead of the string.
+This will allow us avoiding string manipulations.
+END
+ }
+ attr {
+ name: "num_buckets"
+ description: <<END
+It is used if hashed_output is true.
+output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
+END
+ }
+ attr {
+ name: "hash_key"
+ description: <<END
+Specify the hash_key that will be used by the `FingerprintCat64`
+function to combine the crosses fingerprints.
+END
+ }
+ summary: "Generates sparse cross from a list of sparse and dense tensors."
+ description: <<END
+The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
+representing features of one feature column. It outputs a 2D `SparseTensor` with
+the batchwise crosses of these features.
+
+For example, if the inputs are
+
+ inputs[0]: SparseTensor with shape = [2, 2]
+ [0, 0]: "a"
+ [1, 0]: "b"
+ [1, 1]: "c"
+
+ inputs[1]: SparseTensor with shape = [2, 1]
+ [0, 0]: "d"
+ [1, 0]: "e"
+
+ inputs[2]: Tensor [["f"], ["g"]]
+
+then the output will be
+
+ shape = [2, 2]
+ [0, 0]: "a_X_d_X_f"
+ [1, 0]: "b_X_e_X_g"
+ [1, 1]: "c_X_e_X_g"
+
+if hashed_output=true then the output will be
+
+ shape = [2, 2]
+ [0, 0]: FingerprintCat64(
+ Fingerprint64("f"), FingerprintCat64(
+ Fingerprint64("d"), Fingerprint64("a")))
+ [1, 0]: FingerprintCat64(
+ Fingerprint64("g"), FingerprintCat64(
+ Fingerprint64("e"), Fingerprint64("b")))
+ [1, 1]: FingerprintCat64(
+ Fingerprint64("g"), FingerprintCat64(
+ Fingerprint64("e"), Fingerprint64("c")))
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseAdd.pbtxt
new file mode 100644
index 0000000000..81d346adfb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseAdd.pbtxt
@@ -0,0 +1,45 @@
+op {
+ graph_op_name: "SparseDenseCwiseAdd"
+ in_arg {
+ name: "sp_indices"
+ description: <<END
+2-D. `N x R` matrix with the indices of non-empty values in a
+SparseTensor, possibly not in canonical ordering.
+END
+ }
+ in_arg {
+ name: "sp_values"
+ description: <<END
+1-D. `N` non-empty values corresponding to `sp_indices`.
+END
+ }
+ in_arg {
+ name: "sp_shape"
+ description: <<END
+1-D. Shape of the input SparseTensor.
+END
+ }
+ in_arg {
+ name: "dense"
+ description: <<END
+`R`-D. The dense Tensor operand.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+1-D. The `N` values that are operated on.
+END
+ }
+ summary: "Adds up a SparseTensor and a dense Tensor, using these special rules:"
+ description: <<END
+(1) Broadcasts the dense side to have the same shape as the sparse side, if
+ eligible;
+(2) Then, only the dense values pointed to by the indices of the SparseTensor
+ participate in the cwise addition.
+
+By these rules, the result is a logical SparseTensor with exactly the same
+indices and shape, but possibly with different non-zero values. The output of
+this Op is the resultant non-zero values.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseDiv.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseDiv.pbtxt
new file mode 100644
index 0000000000..40ea9c846a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseDiv.pbtxt
@@ -0,0 +1,39 @@
+op {
+ graph_op_name: "SparseDenseCwiseDiv"
+ in_arg {
+ name: "sp_indices"
+ description: <<END
+2-D. `N x R` matrix with the indices of non-empty values in a
+SparseTensor, possibly not in canonical ordering.
+END
+ }
+ in_arg {
+ name: "sp_values"
+ description: <<END
+1-D. `N` non-empty values corresponding to `sp_indices`.
+END
+ }
+ in_arg {
+ name: "sp_shape"
+ description: <<END
+1-D. Shape of the input SparseTensor.
+END
+ }
+ in_arg {
+ name: "dense"
+ description: <<END
+`R`-D. The dense Tensor operand.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+1-D. The `N` values that are operated on.
+END
+ }
+ summary: "Component-wise divides a SparseTensor by a dense Tensor."
+ description: <<END
+*Limitation*: this Op only broadcasts the dense side to the sparse side, but not
+the other direction.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseMul.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseMul.pbtxt
new file mode 100644
index 0000000000..262ab2dc76
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseDenseCwiseMul.pbtxt
@@ -0,0 +1,43 @@
+op {
+ graph_op_name: "SparseDenseCwiseMul"
+ in_arg {
+ name: "sp_indices"
+ description: <<END
+2-D. `N x R` matrix with the indices of non-empty values in a
+SparseTensor, possibly not in canonical ordering.
+END
+ }
+ in_arg {
+ name: "sp_values"
+ description: <<END
+1-D. `N` non-empty values corresponding to `sp_indices`.
+END
+ }
+ in_arg {
+ name: "sp_shape"
+ description: <<END
+1-D. Shape of the input SparseTensor.
+END
+ }
+ in_arg {
+ name: "dense"
+ description: <<END
+`R`-D. The dense Tensor operand.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+1-D. The `N` values that are operated on.
+END
+ }
+ summary: "Component-wise multiplies a SparseTensor by a dense Tensor."
+ description: <<END
+The output locations corresponding to the implicitly zero elements in the sparse
+tensor will be zero (i.e., will not take up storage space), regardless of the
+contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
+
+*Limitation*: this Op only broadcasts the dense side to the sparse side, but not
+the other direction.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseFillEmptyRows.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseFillEmptyRows.pbtxt
new file mode 100644
index 0000000000..f9f25554b2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseFillEmptyRows.pbtxt
@@ -0,0 +1,87 @@
+op {
+ graph_op_name: "SparseFillEmptyRows"
+ in_arg {
+ name: "indices"
+ description: <<END
+2-D. the indices of the sparse tensor.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+1-D. the values of the sparse tensor.
+END
+ }
+ in_arg {
+ name: "dense_shape"
+ description: <<END
+1-D. the shape of the sparse tensor.
+END
+ }
+ in_arg {
+ name: "default_value"
+ description: <<END
+0-D. default value to insert into location `[row, 0, ..., 0]`
+ for rows missing from the input sparse tensor.
+output indices: 2-D. the indices of the filled sparse tensor.
+END
+ }
+ out_arg {
+ name: "output_values"
+ description: <<END
+1-D. the values of the filled sparse tensor.
+END
+ }
+ out_arg {
+ name: "empty_row_indicator"
+ description: <<END
+1-D. whether the dense row was missing in the
+input sparse tensor.
+END
+ }
+ out_arg {
+ name: "reverse_index_map"
+ description: <<END
+1-D. a map from the input indices to the output indices.
+END
+ }
+ summary: "Fills empty rows in the input 2-D `SparseTensor` with a default value."
+ description: <<END
+The input `SparseTensor` is represented via the tuple of inputs
+(`indices`, `values`, `dense_shape`). The output `SparseTensor` has the
+same `dense_shape` but with indices `output_indices` and values
+`output_values`.
+
+This op inserts a single entry for every row that doesn't have any values.
+The index is created as `[row, 0, ..., 0]` and the inserted value
+is `default_value`.
+
+For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
+
+ [0, 1]: a
+ [0, 3]: b
+ [2, 0]: c
+ [3, 1]: d
+
+Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
+
+ [0, 1]: a
+ [0, 3]: b
+ [1, 0]: default_value
+ [2, 0]: c
+ [3, 1]: d
+ [4, 0]: default_value
+
+The output `SparseTensor` will be in row-major order and will have the
+same shape as the input.
+
+This op also returns an indicator vector shaped `[dense_shape[0]]` such that
+
+ empty_row_indicator[i] = True iff row i was an empty row.
+
+And a reverse index map vector shaped `[indices.shape[0]]` that is used during
+backpropagation,
+
+ reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseFillEmptyRowsGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseFillEmptyRowsGrad.pbtxt
new file mode 100644
index 0000000000..eef43e61f2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseFillEmptyRowsGrad.pbtxt
@@ -0,0 +1,38 @@
+op {
+ graph_op_name: "SparseFillEmptyRowsGrad"
+ in_arg {
+ name: "reverse_index_map"
+ description: <<END
+1-D. The reverse index map from SparseFillEmptyRows.
+END
+ }
+ in_arg {
+ name: "grad_values"
+ description: <<END
+1-D. The gradients from backprop.
+END
+ }
+ out_arg {
+ name: "d_values"
+ description: <<END
+1-D. The backprop into values.
+END
+ }
+ out_arg {
+ name: "d_default_value"
+ description: <<END
+0-D. The backprop into default_value.
+END
+ }
+ summary: "The gradient of SparseFillEmptyRows."
+ description: <<END
+Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
+shaped `[N_full]`, where `N_full >= N` and copies data into either
+`d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and
+`d_default_value` is a scalar.
+
+ d_values[j] = grad_values[reverse_index_map[j]]
+ d_default_value = sum_{k : 0 .. N_full - 1} (
+ grad_values[k] * 1{k not in reverse_index_map})
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseMatMul.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseMatMul.pbtxt
new file mode 100644
index 0000000000..58f2ede629
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseMatMul.pbtxt
@@ -0,0 +1,13 @@
+op {
+ graph_op_name: "SparseMatMul"
+ summary: "Multiply matrix \"a\" by matrix \"b\"."
+ description: <<END
+The inputs must be two-dimensional matrices and the inner dimension of "a" must
+match the outer dimension of "b". This op is optimized for the case where at
+least one of "a" or "b" is sparse. The breakeven for using this versus a dense
+matrix multiply on one platform was 30% zero values in the sparse matrix.
+
+The gradient computation of this operation will only take advantage of sparsity
+in the input gradient when that gradient comes from a Relu.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseReduceMax.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseReduceMax.pbtxt
new file mode 100644
index 0000000000..2c2e7e0df1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseReduceMax.pbtxt
@@ -0,0 +1,55 @@
+op {
+ graph_op_name: "SparseReduceMax"
+ in_arg {
+ name: "input_indices"
+ description: <<END
+2-D. `N x R` matrix with the indices of non-empty values in a
+SparseTensor, possibly not in canonical ordering.
+END
+ }
+ in_arg {
+ name: "input_values"
+ description: <<END
+1-D. `N` non-empty values corresponding to `input_indices`.
+END
+ }
+ in_arg {
+ name: "input_shape"
+ description: <<END
+1-D. Shape of the input SparseTensor.
+END
+ }
+ in_arg {
+ name: "reduction_axes"
+ description: <<END
+1-D. Length-`K` vector containing the reduction axes.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+`R-K`-D. The reduced Tensor.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If true, retain reduced dimensions with length 1.
+END
+ }
+ summary: "Computes the max of elements across dimensions of a SparseTensor."
+ description: <<END
+This Op takes a SparseTensor and is the sparse counterpart to
+`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
+instead of a sparse one.
+
+Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+with length 1.
+
+If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+with a single element is returned. Additionally, the axes can be negative,
+which are interpreted according to the indexing rules in Python.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseReduceMaxSparse.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseReduceMaxSparse.pbtxt
new file mode 100644
index 0000000000..c75a2bb233
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseReduceMaxSparse.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "SparseReduceMaxSparse"
+ in_arg {
+ name: "input_indices"
+ description: <<END
+2-D. `N x R` matrix with the indices of non-empty values in a
+SparseTensor, possibly not in canonical ordering.
+END
+ }
+ in_arg {
+ name: "input_values"
+ description: <<END
+1-D. `N` non-empty values corresponding to `input_indices`.
+END
+ }
+ in_arg {
+ name: "input_shape"
+ description: <<END
+1-D. Shape of the input SparseTensor.
+END
+ }
+ in_arg {
+ name: "reduction_axes"
+ description: <<END
+1-D. Length-`K` vector containing the reduction axes.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If true, retain reduced dimensions with length 1.
+END
+ }
+ summary: "Computes the max of elements across dimensions of a SparseTensor."
+ description: <<END
+This Op takes a SparseTensor and is the sparse counterpart to
+`tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a
+SparseTensor.
+
+Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+with length 1.
+
+If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+with a single element is returned. Additionally, the axes can be negative,
+which are interpreted according to the indexing rules in Python.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseReduceSum.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseReduceSum.pbtxt
new file mode 100644
index 0000000000..cf6f868d14
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseReduceSum.pbtxt
@@ -0,0 +1,55 @@
+op {
+ graph_op_name: "SparseReduceSum"
+ in_arg {
+ name: "input_indices"
+ description: <<END
+2-D. `N x R` matrix with the indices of non-empty values in a
+SparseTensor, possibly not in canonical ordering.
+END
+ }
+ in_arg {
+ name: "input_values"
+ description: <<END
+1-D. `N` non-empty values corresponding to `input_indices`.
+END
+ }
+ in_arg {
+ name: "input_shape"
+ description: <<END
+1-D. Shape of the input SparseTensor.
+END
+ }
+ in_arg {
+ name: "reduction_axes"
+ description: <<END
+1-D. Length-`K` vector containing the reduction axes.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+`R-K`-D. The reduced Tensor.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If true, retain reduced dimensions with length 1.
+END
+ }
+ summary: "Computes the sum of elements across dimensions of a SparseTensor."
+ description: <<END
+This Op takes a SparseTensor and is the sparse counterpart to
+`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
+instead of a sparse one.
+
+Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+with length 1.
+
+If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+with a single element is returned. Additionally, the axes can be negative,
+which are interpreted according to the indexing rules in Python.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseReduceSumSparse.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseReduceSumSparse.pbtxt
new file mode 100644
index 0000000000..cad169e5f9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseReduceSumSparse.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "SparseReduceSumSparse"
+ in_arg {
+ name: "input_indices"
+ description: <<END
+2-D. `N x R` matrix with the indices of non-empty values in a
+SparseTensor, possibly not in canonical ordering.
+END
+ }
+ in_arg {
+ name: "input_values"
+ description: <<END
+1-D. `N` non-empty values corresponding to `input_indices`.
+END
+ }
+ in_arg {
+ name: "input_shape"
+ description: <<END
+1-D. Shape of the input SparseTensor.
+END
+ }
+ in_arg {
+ name: "reduction_axes"
+ description: <<END
+1-D. Length-`K` vector containing the reduction axes.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If true, retain reduced dimensions with length 1.
+END
+ }
+ summary: "Computes the sum of elements across dimensions of a SparseTensor."
+ description: <<END
+This Op takes a SparseTensor and is the sparse counterpart to
+`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
+SparseTensor.
+
+Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+with length 1.
+
+If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+with a single element is returned. Additionally, the axes can be negative,
+which are interpreted according to the indexing rules in Python.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseReorder.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseReorder.pbtxt
new file mode 100644
index 0000000000..07ffc6dcf3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseReorder.pbtxt
@@ -0,0 +1,46 @@
+op {
+ graph_op_name: "SparseReorder"
+ in_arg {
+ name: "input_indices"
+ description: <<END
+2-D. `N x R` matrix with the indices of non-empty values in a
+SparseTensor, possibly not in canonical ordering.
+END
+ }
+ in_arg {
+ name: "input_values"
+ description: <<END
+1-D. `N` non-empty values corresponding to `input_indices`.
+END
+ }
+ in_arg {
+ name: "input_shape"
+ description: <<END
+1-D. Shape of the input SparseTensor.
+END
+ }
+ out_arg {
+ name: "output_indices"
+ description: <<END
+2-D. `N x R` matrix with the same indices as input_indices, but
+in canonical row-major ordering.
+END
+ }
+ out_arg {
+ name: "output_values"
+ description: <<END
+1-D. `N` non-empty values corresponding to `output_indices`.
+END
+ }
+ summary: "Reorders a SparseTensor into the canonical, row-major ordering."
+ description: <<END
+Note that by convention, all sparse ops preserve the canonical ordering along
+increasing dimension number. The only time ordering can be violated is during
+manual manipulation of the indices and values vectors to add entries.
+
+Reordering does not affect the shape of the SparseTensor.
+
+If the tensor has rank `R` and `N` non-empty values, `input_indices` has
+shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseReshape.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseReshape.pbtxt
new file mode 100644
index 0000000000..84fef9fbc4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseReshape.pbtxt
@@ -0,0 +1,55 @@
+op {
+ graph_op_name: "SparseReshape"
+ in_arg {
+ name: "input_indices"
+ description: <<END
+2-D. `N x R_in` matrix with the indices of non-empty values in a
+SparseTensor.
+END
+ }
+ in_arg {
+ name: "input_shape"
+ description: <<END
+1-D. `R_in` vector with the input SparseTensor's dense shape.
+END
+ }
+ in_arg {
+ name: "new_shape"
+ description: <<END
+1-D. `R_out` vector with the requested new dense shape.
+END
+ }
+ out_arg {
+ name: "output_indices"
+ description: <<END
+2-D. `N x R_out` matrix with the updated indices of non-empty
+values in the output SparseTensor.
+END
+ }
+ out_arg {
+ name: "output_shape"
+ description: <<END
+1-D. `R_out` vector with the full dense shape of the output
+SparseTensor. This is the same as `new_shape` but with any -1 dimensions
+filled in.
+END
+ }
+ summary: "Reshapes a SparseTensor to represent values in a new dense shape."
+ description: <<END
+This operation has the same semantics as reshape on the represented dense
+tensor. The `input_indices` are recomputed based on the requested `new_shape`.
+
+If one component of `new_shape` is the special value -1, the size of that
+dimension is computed so that the total dense size remains constant. At
+most one component of `new_shape` can be -1. The number of dense elements
+implied by `new_shape` must be the same as the number of dense elements
+originally implied by `input_shape`.
+
+Reshaping does not affect the order of values in the SparseTensor.
+
+If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
+has length `R_out`, then `input_indices` has shape `[N, R_in]`,
+`input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
+`output_shape` has length `R_out`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseSegmentMean.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseSegmentMean.pbtxt
new file mode 100644
index 0000000000..18e6660595
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseSegmentMean.pbtxt
@@ -0,0 +1,30 @@
+op {
+ graph_op_name: "SparseSegmentMean"
+ in_arg {
+ name: "indices"
+ description: <<END
+A 1-D tensor. Has same rank as `segment_ids`.
+END
+ }
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+A 1-D tensor. Values should be sorted and can be repeated.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.
+END
+ }
+ summary: "Computes the mean along sparse segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
+dimension, selecting a subset of dimension 0, specified by `indices`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseSegmentMeanGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseSegmentMeanGrad.pbtxt
new file mode 100644
index 0000000000..b58d6671b5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseSegmentMeanGrad.pbtxt
@@ -0,0 +1,32 @@
+op {
+ graph_op_name: "SparseSegmentMeanGrad"
+ in_arg {
+ name: "grad"
+ description: <<END
+gradient propagated to the SparseSegmentMean op.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+indices passed to the corresponding SparseSegmentMean op.
+END
+ }
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+segment_ids passed to the corresponding SparseSegmentMean op.
+END
+ }
+ in_arg {
+ name: "output_dim0"
+ description: <<END
+dimension 0 of "data" passed to SparseSegmentMean op.
+END
+ }
+ summary: "Computes gradients for SparseSegmentMean."
+ description: <<END
+Returns tensor "output" with same shape as grad, except for dimension 0 whose
+value is output_dim0.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseSegmentSqrtN.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseSegmentSqrtN.pbtxt
new file mode 100644
index 0000000000..3fdeb66aed
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseSegmentSqrtN.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "SparseSegmentSqrtN"
+ in_arg {
+ name: "indices"
+ description: <<END
+A 1-D tensor. Has same rank as `segment_ids`.
+END
+ }
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+A 1-D tensor. Values should be sorted and can be repeated.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.
+END
+ }
+ summary: "Computes the sum along sparse segments of a tensor divided by the sqrt of N."
+ description: <<END
+N is the size of the segment being reduced.
+
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseSegmentSqrtNGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseSegmentSqrtNGrad.pbtxt
new file mode 100644
index 0000000000..7cb2e29ef4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseSegmentSqrtNGrad.pbtxt
@@ -0,0 +1,32 @@
+op {
+ graph_op_name: "SparseSegmentSqrtNGrad"
+ in_arg {
+ name: "grad"
+ description: <<END
+gradient propagated to the SparseSegmentSqrtN op.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+indices passed to the corresponding SparseSegmentSqrtN op.
+END
+ }
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+segment_ids passed to the corresponding SparseSegmentSqrtN op.
+END
+ }
+ in_arg {
+ name: "output_dim0"
+ description: <<END
+dimension 0 of "data" passed to SparseSegmentSqrtN op.
+END
+ }
+ summary: "Computes gradients for SparseSegmentSqrtN."
+ description: <<END
+Returns tensor "output" with same shape as grad, except for dimension 0 whose
+value is output_dim0.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseSegmentSum.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseSegmentSum.pbtxt
new file mode 100644
index 0000000000..cdf44a89a3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseSegmentSum.pbtxt
@@ -0,0 +1,53 @@
+op {
+ graph_op_name: "SparseSegmentSum"
+ in_arg {
+ name: "indices"
+ description: <<END
+A 1-D tensor. Has same rank as `segment_ids`.
+END
+ }
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+A 1-D tensor. Values should be sorted and can be repeated.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Has same shape as data, except for dimension 0 which
+has size `k`, the number of segments.
+END
+ }
+ summary: "Computes the sum along sparse segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
+dimension, selecting a subset of dimension 0, specified by `indices`.
+
+For example:
+
+```python
+c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
+
+# Select two rows, one segment.
+tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
+# => [[0 0 0 0]]
+
+# Select two rows, two segment.
+tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
+# => [[ 1 2 3 4]
+# [-1 -2 -3 -4]]
+
+# Select all rows, two segments.
+tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
+# => [[0 0 0 0]
+# [5 6 7 8]]
+
+# Which is equivalent to:
+tf.segment_sum(c, tf.constant([0, 0, 1]))
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseSlice.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseSlice.pbtxt
new file mode 100644
index 0000000000..637ba6ece4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseSlice.pbtxt
@@ -0,0 +1,67 @@
+op {
+ graph_op_name: "SparseSlice"
+ in_arg {
+ name: "indices"
+ description: <<END
+2-D tensor represents the indices of the sparse tensor.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+1-D tensor represents the values of the sparse tensor.
+END
+ }
+ in_arg {
+ name: "shape"
+ description: <<END
+1-D. tensor represents the shape of the sparse tensor.
+END
+ }
+ in_arg {
+ name: "start"
+ description: <<END
+1-D. tensor represents the start of the slice.
+END
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+1-D. tensor represents the size of the slice.
+output indices: A list of 1-D tensors represents the indices of the output
+sparse tensors.
+END
+ }
+ out_arg {
+ name: "output_values"
+ description: <<END
+A list of 1-D tensors represents the values of the output sparse
+tensors.
+END
+ }
+ out_arg {
+ name: "output_shape"
+ description: <<END
+A list of 1-D tensors represents the shape of the output sparse
+tensors.
+END
+ }
+ summary: "Slice a `SparseTensor` based on the `start` and `size`."
+ description: <<END
+For example, if the input is
+
+ input_tensor = shape = [2, 7]
+ [ a d e ]
+ [b c ]
+
+Graphically the output tensors are:
+
+ sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
+ [ a ]
+ [b c ]
+
+ sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
+ [ d e ]
+ [ ]
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseSoftmax.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseSoftmax.pbtxt
new file mode 100644
index 0000000000..c64c3c68a1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseSoftmax.pbtxt
@@ -0,0 +1,46 @@
+op {
+ graph_op_name: "SparseSoftmax"
+ in_arg {
+ name: "sp_indices"
+ description: <<END
+2-D. `NNZ x R` matrix with the indices of non-empty values in a
+SparseTensor, in canonical ordering.
+END
+ }
+ in_arg {
+ name: "sp_values"
+ description: <<END
+1-D. `NNZ` non-empty values corresponding to `sp_indices`.
+END
+ }
+ in_arg {
+ name: "sp_shape"
+ description: <<END
+1-D. Shape of the input SparseTensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+1-D. The `NNZ` values for the result `SparseTensor`.
+END
+ }
+ summary: "Applies softmax to a batched N-D `SparseTensor`."
+ description: <<END
+The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
+(where `N >= 2`), and with indices sorted in the canonical lexicographic order.
+
+This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
+logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
+zero elements do not participate*. Specifically, the algorithm is equivalent
+to the following:
+
+ (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
+ with shape `[B, C]`, along the size-C dimension;
+ (2) Masks out the original implicitly-zero locations;
+ (3) Renormalizes the remaining elements.
+
+Hence, the `SparseTensor` result has exactly the same non-zero indices and
+shape.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt
new file mode 100644
index 0000000000..a867bbe04d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "SparseSoftmaxCrossEntropyWithLogits"
+ in_arg {
+ name: "features"
+ description: <<END
+batch_size x num_classes matrix
+END
+ }
+ in_arg {
+ name: "labels"
+ description: <<END
+batch_size vector with values in [0, num_classes).
+This is the label for the given minibatch entry.
+END
+ }
+ out_arg {
+ name: "loss"
+ description: <<END
+Per example loss (batch_size vector).
+END
+ }
+ out_arg {
+ name: "backprop"
+ description: <<END
+backpropagated gradients (batch_size x num_classes matrix).
+END
+ }
+ summary: "Computes softmax cross entropy cost and gradients to backpropagate."
+ description: <<END
+Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
+a matrix of label probabilities, but rather a single label per row
+of features. This label is considered to have probability 1.0 for the
+given row.
+
+Inputs are the logits, not probabilities.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseSparseMaximum.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseSparseMaximum.pbtxt
new file mode 100644
index 0000000000..34ccddd5d4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseSparseMaximum.pbtxt
@@ -0,0 +1,56 @@
+op {
+ graph_op_name: "SparseSparseMaximum"
+ in_arg {
+ name: "a_indices"
+ description: <<END
+2-D. `N x R` matrix with the indices of non-empty values in a
+SparseTensor, in the canonical lexicographic ordering.
+END
+ }
+ in_arg {
+ name: "a_values"
+ description: <<END
+1-D. `N` non-empty values corresponding to `a_indices`.
+END
+ }
+ in_arg {
+ name: "a_shape"
+ description: <<END
+1-D. Shape of the input SparseTensor.
+END
+ }
+ in_arg {
+ name: "b_indices"
+ description: <<END
+counterpart to `a_indices` for the other operand.
+END
+ }
+ in_arg {
+ name: "b_values"
+ description: <<END
+counterpart to `a_values` for the other operand; must be of the same dtype.
+END
+ }
+ in_arg {
+ name: "b_shape"
+ description: <<END
+counterpart to `a_shape` for the other operand; the two shapes must be equal.
+END
+ }
+ out_arg {
+ name: "output_indices"
+ description: <<END
+2-D. The indices of the output SparseTensor.
+END
+ }
+ out_arg {
+ name: "output_values"
+ description: <<END
+1-D. The values of the output SparseTensor.
+END
+ }
+ summary: "Returns the element-wise max of two SparseTensors."
+ description: <<END
+Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseSparseMinimum.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseSparseMinimum.pbtxt
new file mode 100644
index 0000000000..1b25684bb0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseSparseMinimum.pbtxt
@@ -0,0 +1,56 @@
+op {
+ graph_op_name: "SparseSparseMinimum"
+ in_arg {
+ name: "a_indices"
+ description: <<END
+2-D. `N x R` matrix with the indices of non-empty values in a
+SparseTensor, in the canonical lexicographic ordering.
+END
+ }
+ in_arg {
+ name: "a_values"
+ description: <<END
+1-D. `N` non-empty values corresponding to `a_indices`.
+END
+ }
+ in_arg {
+ name: "a_shape"
+ description: <<END
+1-D. Shape of the input SparseTensor.
+END
+ }
+ in_arg {
+ name: "b_indices"
+ description: <<END
+counterpart to `a_indices` for the other operand.
+END
+ }
+ in_arg {
+ name: "b_values"
+ description: <<END
+counterpart to `a_values` for the other operand; must be of the same dtype.
+END
+ }
+ in_arg {
+ name: "b_shape"
+ description: <<END
+counterpart to `a_shape` for the other operand; the two shapes must be equal.
+END
+ }
+ out_arg {
+ name: "output_indices"
+ description: <<END
+2-D. The indices of the output SparseTensor.
+END
+ }
+ out_arg {
+ name: "output_values"
+ description: <<END
+1-D. The values of the output SparseTensor.
+END
+ }
+ summary: "Returns the element-wise min of two SparseTensors."
+ description: <<END
+Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseSplit.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseSplit.pbtxt
new file mode 100644
index 0000000000..cc90ad333b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseSplit.pbtxt
@@ -0,0 +1,70 @@
+op {
+ graph_op_name: "SparseSplit"
+ in_arg {
+ name: "split_dim"
+ description: <<END
+0-D. The dimension along which to split. Must be in the range
+`[0, rank(shape))`.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+2-D tensor represents the indices of the sparse tensor.
+END
+ }
+ in_arg {
+ name: "values"
+ description: <<END
+1-D tensor represents the values of the sparse tensor.
+END
+ }
+ in_arg {
+ name: "shape"
+ description: <<END
+1-D. tensor represents the shape of the sparse tensor.
+output indices: A list of 1-D tensors represents the indices of the output
+sparse tensors.
+END
+ }
+ out_arg {
+ name: "output_values"
+ description: <<END
+A list of 1-D tensors represents the values of the output sparse
+tensors.
+END
+ }
+ out_arg {
+ name: "output_shape"
+ description: <<END
+A list of 1-D tensors represents the shape of the output sparse
+tensors.
+END
+ }
+ attr {
+ name: "num_split"
+ description: <<END
+The number of ways to split.
+END
+ }
+ summary: "Split a `SparseTensor` into `num_split` tensors along one dimension."
+ description: <<END
+If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
+`[0 : shape[split_dim] % num_split]` gets one extra dimension.
+For example, if `split_dim = 1` and `num_split = 2` and the input is
+
+ input_tensor = shape = [2, 7]
+ [ a d e ]
+ [b c ]
+
+Graphically the output tensors are:
+
+ output_tensor[0] = shape = [2, 4]
+ [ a ]
+ [b c ]
+
+ output_tensor[1] = shape = [2, 3]
+ [ d e ]
+ [ ]
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseTensorDenseAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseTensorDenseAdd.pbtxt
new file mode 100644
index 0000000000..7225447188
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseTensorDenseAdd.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "SparseTensorDenseAdd"
+ in_arg {
+ name: "a_indices"
+ description: <<END
+2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
+END
+ }
+ in_arg {
+ name: "a_values"
+ description: <<END
+1-D. The `values` of the `SparseTensor`, with shape `[nnz]`.
+END
+ }
+ in_arg {
+ name: "a_shape"
+ description: <<END
+1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`.
+END
+ }
+ in_arg {
+ name: "b"
+ description: <<END
+`ndims`-D Tensor. With shape `a_shape`.
+END
+ }
+ summary: "Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`."
+ description: <<END
+This Op does not require `a_indices` be sorted in standard lexicographic order.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseTensorDenseMatMul.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseTensorDenseMatMul.pbtxt
new file mode 100644
index 0000000000..0a5dc08d21
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseTensorDenseMatMul.pbtxt
@@ -0,0 +1,53 @@
+op {
+ graph_op_name: "SparseTensorDenseMatMul"
+ in_arg {
+ name: "a_indices"
+ description: <<END
+2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
+END
+ }
+ in_arg {
+ name: "a_values"
+ description: <<END
+1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector.
+END
+ }
+ in_arg {
+ name: "a_shape"
+ description: <<END
+1-D. The `shape` of the `SparseTensor`, size `[2]` Vector.
+END
+ }
+ in_arg {
+ name: "b"
+ description: <<END
+2-D. A dense Matrix.
+END
+ }
+ attr {
+ name: "adjoint_a"
+ description: <<END
+Use the adjoint of A in the matrix multiply. If A is complex, this
+is transpose(conj(A)). Otherwise it's transpose(A).
+END
+ }
+ attr {
+ name: "adjoint_b"
+ description: <<END
+Use the adjoint of B in the matrix multiply. If B is complex, this
+is transpose(conj(B)). Otherwise it's transpose(B).
+END
+ }
+ summary: "Multiply SparseTensor (of rank 2) \"A\" by dense matrix \"B\"."
+ description: <<END
+No validity checking is performed on the indices of A. However, the following
+input format is recommended for optimal behavior:
+
+if adjoint_a == false:
+ A should be sorted in lexicographically increasing order. Use SparseReorder
+ if you're not sure.
+if adjoint_a == true:
+ A should be sorted in order of increasing dimension 1 (i.e., "column major"
+ order instead of "row major" order).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseTensorSliceDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseTensorSliceDataset.pbtxt
new file mode 100644
index 0000000000..ffb8058349
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseTensorSliceDataset.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseTensorSliceDataset"
+ summary: "Creates a dataset that splits a SparseTensor into elements row-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseToDense.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseToDense.pbtxt
new file mode 100644
index 0000000000..5fb0012d04
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseToDense.pbtxt
@@ -0,0 +1,65 @@
+op {
+ graph_op_name: "SparseToDense"
+ in_arg {
+ name: "sparse_indices"
+ description: <<END
+0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete
+index where `sparse_values[i]` will be placed.
+END
+ }
+ in_arg {
+ name: "output_shape"
+ description: <<END
+1-D. Shape of the dense output tensor.
+END
+ }
+ in_arg {
+ name: "sparse_values"
+ description: <<END
+1-D. Values corresponding to each row of `sparse_indices`,
+or a scalar value to be used for all sparse indices.
+END
+ }
+ in_arg {
+ name: "default_value"
+ description: <<END
+Scalar value to set for indices not specified in
+`sparse_indices`.
+END
+ }
+ out_arg {
+ name: "dense"
+ description: <<END
+Dense output tensor of shape `output_shape`.
+END
+ }
+ attr {
+ name: "validate_indices"
+ description: <<END
+If true, indices are checked to make sure they are sorted in
+lexicographic order and that there are no repeats.
+END
+ }
+ summary: "Converts a sparse representation into a dense tensor."
+ description: <<END
+Builds an array `dense` with shape `output_shape` such that
+
+```
+# If sparse_indices is scalar
+dense[i] = (i == sparse_indices ? sparse_values : default_value)
+
+# If sparse_indices is a vector, then for each i
+dense[sparse_indices[i]] = sparse_values[i]
+
+# If sparse_indices is an n by d matrix, then for each i in [0, n)
+dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
+```
+
+All other values in `dense` are set to `default_value`. If `sparse_values` is a
+scalar, all sparse indices are set to this single value.
+
+Indices should be sorted in lexicographic order, and indices must not
+contain any repeats. If `validate_indices` is true, these properties
+are checked during execution.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SparseToSparseSetOperation.pbtxt b/tensorflow/core/api_def/base_api/api_def_SparseToSparseSetOperation.pbtxt
new file mode 100644
index 0000000000..766f756bb5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SparseToSparseSetOperation.pbtxt
@@ -0,0 +1,93 @@
+op {
+ graph_op_name: "SparseToSparseSetOperation"
+ in_arg {
+ name: "set1_indices"
+ description: <<END
+2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
+order.
+END
+ }
+ in_arg {
+ name: "set1_values"
+ description: <<END
+1D `Tensor`, values of a `SparseTensor`. Must be in row-major
+order.
+END
+ }
+ in_arg {
+ name: "set1_shape"
+ description: <<END
+1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
+be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
+max set size across `0...n-1` dimensions.
+END
+ }
+ in_arg {
+ name: "set2_indices"
+ description: <<END
+2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
+order.
+END
+ }
+ in_arg {
+ name: "set2_values"
+ description: <<END
+1D `Tensor`, values of a `SparseTensor`. Must be in row-major
+order.
+END
+ }
+ in_arg {
+ name: "set2_shape"
+ description: <<END
+1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
+be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
+max set size across `0...n-1` dimensions.
+END
+ }
+ out_arg {
+ name: "result_indices"
+ description: <<END
+2D indices of a `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "result_values"
+ description: <<END
+1D values of a `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "result_shape"
+ description: <<END
+1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
+the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
+is the max result set size across all `0...n-1` dimensions.
+END
+ }
+ summary: "Applies set operation along last dimension of 2 `SparseTensor` inputs."
+ description: <<END
+See SetOperationOp::SetOperationFromContext for values of `set_operation`.
+
+If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
+order and range of `set1` and `set2` indices.
+
+Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
+and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
+as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
+ignored.
+
+Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
+and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
+as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
+ignored.
+
+If `validate_indices` is `True`, this op validates the order and range of `set1`
+and `set2` indices.
+
+Output `result` is a `SparseTensor` represented by `result_indices`,
+`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
+has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
+dimension contains the result of `set_operation` applied to the corresponding
+`[0...n-1]` dimension of `set`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Split.pbtxt b/tensorflow/core/api_def/base_api/api_def_Split.pbtxt
new file mode 100644
index 0000000000..802f440896
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Split.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "Split"
+ in_arg {
+ name: "split_dim"
+ rename_to: "axis"
+ description: <<END
+0-D. The dimension along which to split. Must be in the range
+`[-rank(value), rank(value))`.
+END
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+The tensor to split.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+They are identically shaped tensors, whose shape matches that of `value`
+except along `split_dim`, where their sizes are
+`values.shape[split_dim] / num_split`.
+END
+ }
+ attr {
+ name: "num_split"
+ description: <<END
+The number of ways to split. Must evenly divide
+`value.shape[split_dim]`.
+END
+ }
+ summary: "Splits a tensor into `num_split` tensors along one dimension."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SplitV.pbtxt b/tensorflow/core/api_def/base_api/api_def_SplitV.pbtxt
new file mode 100644
index 0000000000..6c1660ffb6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SplitV.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "SplitV"
+ in_arg {
+ name: "value"
+ description: <<END
+The tensor to split.
+END
+ }
+ in_arg {
+ name: "size_splits"
+ description: <<END
+list containing the sizes of each output tensor along the split
+dimension. Must sum to the dimension of value along split_dim.
+Can contain one -1 indicating that dimension is to be inferred.
+END
+ }
+ in_arg {
+ name: "split_dim"
+ rename_to: "axis"
+ description: <<END
+0-D. The dimension along which to split. Must be in the range
+`[-rank(value), rank(value))`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Tensors whose shape matches that of `value`
+except along `split_dim`, where their sizes are
+`size_splits[i]`.
+END
+ }
+ summary: "Splits a tensor into `num_split` tensors along one dimension."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SqlDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_SqlDataset.pbtxt
new file mode 100644
index 0000000000..7570d5da56
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SqlDataset.pbtxt
@@ -0,0 +1,22 @@
+op {
+ graph_op_name: "SqlDataset"
+ in_arg {
+ name: "driver_name"
+ description: <<END
+The database type. Currently, the only supported type is 'sqlite'.
+END
+ }
+ in_arg {
+ name: "data_source_name"
+ description: <<END
+A connection string to connect to the database.
+END
+ }
+ in_arg {
+ name: "query"
+ description: <<END
+A SQL query to execute.
+END
+ }
+ summary: "Creates a dataset that executes a SQL query and emits rows of the result set."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Sqrt.pbtxt b/tensorflow/core/api_def/base_api/api_def_Sqrt.pbtxt
new file mode 100644
index 0000000000..857841dc41
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Sqrt.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "Sqrt"
+ summary: "Computes square root of x element-wise."
+ description: <<END
+I.e., \\(y = \sqrt{x} = x^{1/2}\\).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SqrtGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_SqrtGrad.pbtxt
new file mode 100644
index 0000000000..ac5b737f93
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SqrtGrad.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "SqrtGrad"
+ visibility: HIDDEN
+ summary: "Computes the gradient for the sqrt of `x` wrt its input."
+ description: <<END
+Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
+is the corresponding input gradient.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Square.pbtxt b/tensorflow/core/api_def/base_api/api_def_Square.pbtxt
new file mode 100644
index 0000000000..c3e32a98d1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Square.pbtxt
@@ -0,0 +1,7 @@
+op {
+ graph_op_name: "Square"
+ summary: "Computes square of x element-wise."
+ description: <<END
+I.e., \\(y = x * x = x^2\\).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SquaredDifference.pbtxt b/tensorflow/core/api_def/base_api/api_def_SquaredDifference.pbtxt
new file mode 100644
index 0000000000..51277692d8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SquaredDifference.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "SquaredDifference"
+ summary: "Returns (x - y)(x - y) element-wise."
+ description: <<END
+*NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Squeeze.pbtxt b/tensorflow/core/api_def/base_api/api_def_Squeeze.pbtxt
new file mode 100644
index 0000000000..f84c51536b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Squeeze.pbtxt
@@ -0,0 +1,46 @@
+op {
+ graph_op_name: "Squeeze"
+ in_arg {
+ name: "input"
+ description: <<END
+The `input` to squeeze.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Contains the same data as `input`, but has one or more dimensions of
+size 1 removed.
+END
+ }
+ attr {
+ name: "squeeze_dims"
+ rename_to: "axis"
+ description: <<END
+If specified, only squeezes the dimensions listed. The dimension
+index starts at 0. It is an error to squeeze a dimension that is not 1. Must
+be in the range `[-rank(input), rank(input))`.
+END
+ }
+ summary: "Removes dimensions of size 1 from the shape of a tensor."
+ description: <<END
+Given a tensor `input`, this operation returns a tensor of the same type with
+all dimensions of size 1 removed. If you don't want to remove all size 1
+dimensions, you can remove specific size 1 dimensions by specifying
+`squeeze_dims`.
+
+For example:
+
+```
+# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
+shape(squeeze(t)) ==> [2, 3]
+```
+
+Or, to remove specific size 1 dimensions:
+
+```
+# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
+shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Stack.pbtxt b/tensorflow/core/api_def/base_api/api_def_Stack.pbtxt
new file mode 100644
index 0000000000..3fd6682130
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Stack.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "Stack"
+ visibility: SKIP
+ summary: "Deprecated, use StackV2."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StackClose.pbtxt b/tensorflow/core/api_def/base_api/api_def_StackClose.pbtxt
new file mode 100644
index 0000000000..050d69cbaa
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StackClose.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "StackClose"
+ visibility: SKIP
+ summary: "Deprecated, use StackCloseV2."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StackCloseV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_StackCloseV2.pbtxt
new file mode 100644
index 0000000000..d9b71fec3b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StackCloseV2.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "StackCloseV2"
+ visibility: SKIP
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a stack.
+END
+ }
+ summary: "Delete the stack from its resource container."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StackPop.pbtxt b/tensorflow/core/api_def/base_api/api_def_StackPop.pbtxt
new file mode 100644
index 0000000000..abf45f85cc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StackPop.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "StackPop"
+ visibility: SKIP
+ summary: "Deprecated, use StackPopV2."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StackPopV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_StackPopV2.pbtxt
new file mode 100644
index 0000000000..7e0498dcf3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StackPopV2.pbtxt
@@ -0,0 +1,23 @@
+op {
+ graph_op_name: "StackPopV2"
+ visibility: SKIP
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a stack.
+END
+ }
+ out_arg {
+ name: "elem"
+ description: <<END
+The tensor that is popped from the top of the stack.
+END
+ }
+ attr {
+ name: "elem_type"
+ description: <<END
+The type of the elem that is popped.
+END
+ }
+ summary: "Pop the element at the top of the stack."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StackPush.pbtxt b/tensorflow/core/api_def/base_api/api_def_StackPush.pbtxt
new file mode 100644
index 0000000000..619f20f9aa
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StackPush.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "StackPush"
+ visibility: SKIP
+ summary: "Deprecated, use StackPushV2."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StackPushV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_StackPushV2.pbtxt
new file mode 100644
index 0000000000..83d7dd1f35
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StackPushV2.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "StackPushV2"
+ visibility: SKIP
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a stack.
+END
+ }
+ in_arg {
+ name: "elem"
+ description: <<END
+The tensor to be pushed onto the stack.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The same tensor as the input 'elem'.
+END
+ }
+ attr {
+ name: "swap_memory"
+ description: <<END
+Swap `elem` to CPU. Default to false.
+END
+ }
+ summary: "Push an element onto the stack."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StackV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_StackV2.pbtxt
new file mode 100644
index 0000000000..1699da1271
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StackV2.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "StackV2"
+ visibility: SKIP
+ in_arg {
+ name: "max_size"
+ description: <<END
+The maximum size of the stack if non-negative. If negative, the stack
+size is unlimited.
+END
+ }
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the stack.
+END
+ }
+ attr {
+ name: "elem_type"
+ description: <<END
+The type of the elements on the stack.
+END
+ }
+ attr {
+ name: "stack_name"
+ description: <<END
+Overrides the name used for the temporary stack resource. Default
+value is the name of the 'Stack' op (which is guaranteed unique).
+END
+ }
+ summary: "A stack that produces elements in first-in last-out order."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Stage.pbtxt b/tensorflow/core/api_def/base_api/api_def_Stage.pbtxt
new file mode 100644
index 0000000000..ba9b4bc461
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Stage.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "Stage"
+ in_arg {
+ name: "values"
+ description: <<END
+a list of tensors
+dtypes A list of data types that inserted values should adhere to.
+END
+ }
+ attr {
+ name: "capacity"
+ description: <<END
+Maximum number of elements in the Staging Area. If > 0, inserts
+on the container will block when the capacity is reached.
+END
+ }
+ attr {
+ name: "memory_limit"
+ description: <<END
+The maximum number of bytes allowed for Tensors in the Staging Area.
+If > 0, inserts will block until sufficient space is available.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this queue is placed in the given container. Otherwise,
+a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+It is necessary to match this name to the matching Unstage Op.
+END
+ }
+ summary: "Stage values similar to a lightweight Enqueue."
+ description: <<END
+The basic functionality of this Op is similar to a queue with many
+fewer capabilities and options. This Op is optimized for performance.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StageClear.pbtxt b/tensorflow/core/api_def/base_api/api_def_StageClear.pbtxt
new file mode 100644
index 0000000000..22cbe41090
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StageClear.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "StageClear"
+ summary: "Op removes all elements in the underlying container."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StagePeek.pbtxt b/tensorflow/core/api_def/base_api/api_def_StagePeek.pbtxt
new file mode 100644
index 0000000000..7eba72af2a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StagePeek.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "StagePeek"
+ summary: "Op peeks at the values at the specified index. If the"
+ description: <<END
+underlying container does not contain sufficient elements
+this op will block until it does. This Op is optimized for
+performance.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StageSize.pbtxt b/tensorflow/core/api_def/base_api/api_def_StageSize.pbtxt
new file mode 100644
index 0000000000..7ae827d1b5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StageSize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "StageSize"
+ summary: "Op returns the number of elements in the underlying container."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StatelessRandomNormal.pbtxt b/tensorflow/core/api_def/base_api/api_def_StatelessRandomNormal.pbtxt
new file mode 100644
index 0000000000..b6ef8160e4
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StatelessRandomNormal.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "StatelessRandomNormal"
+ in_arg {
+ name: "shape"
+ description: <<END
+The shape of the output tensor.
+END
+ }
+ in_arg {
+ name: "seed"
+ description: <<END
+2 seeds (shape [2]).
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Random values with specified shape.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the output.
+END
+ }
+ summary: "Outputs deterministic pseudorandom values from a normal distribution."
+ description: <<END
+The generated values will have mean 0 and standard deviation 1.
+
+The outputs are a deterministic function of `shape` and `seed`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StatelessRandomUniform.pbtxt b/tensorflow/core/api_def/base_api/api_def_StatelessRandomUniform.pbtxt
new file mode 100644
index 0000000000..0ba88c3730
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StatelessRandomUniform.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "StatelessRandomUniform"
+ in_arg {
+ name: "shape"
+ description: <<END
+The shape of the output tensor.
+END
+ }
+ in_arg {
+ name: "seed"
+ description: <<END
+2 seeds (shape [2]).
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Random values with specified shape.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the output.
+END
+ }
+ summary: "Outputs deterministic pseudorandom random values from a uniform distribution."
+ description: <<END
+The generated values follow a uniform distribution in the range `[0, 1)`. The
+lower bound 0 is included in the range, while the upper bound 1 is excluded.
+
+The outputs are a deterministic function of `shape` and `seed`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StatelessTruncatedNormal.pbtxt b/tensorflow/core/api_def/base_api/api_def_StatelessTruncatedNormal.pbtxt
new file mode 100644
index 0000000000..37228dba64
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StatelessTruncatedNormal.pbtxt
@@ -0,0 +1,35 @@
+op {
+ graph_op_name: "StatelessTruncatedNormal"
+ in_arg {
+ name: "shape"
+ description: <<END
+The shape of the output tensor.
+END
+ }
+ in_arg {
+ name: "seed"
+ description: <<END
+2 seeds (shape [2]).
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Random values with specified shape.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the output.
+END
+ }
+ summary: "Outputs deterministic pseudorandom values from a truncated normal distribution."
+ description: <<END
+The generated values follow a normal distribution with mean 0 and standard
+deviation 1, except that values whose magnitude is more than 2 standard
+deviations from the mean are dropped and re-picked.
+
+The outputs are a deterministic function of `shape` and `seed`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StopGradient.pbtxt b/tensorflow/core/api_def/base_api/api_def_StopGradient.pbtxt
new file mode 100644
index 0000000000..af4b9f6113
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StopGradient.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "StopGradient"
+ summary: "Stops gradient computation."
+ description: <<END
+When executed in a graph, this op outputs its input tensor as-is.
+
+When building ops to compute gradients, this op prevents the contribution of
+its inputs to be taken into account. Normally, the gradient generator adds ops
+to a graph to compute the derivatives of a specified 'loss' by recursively
+finding out inputs that contributed to its computation. If you insert this op
+in the graph it inputs are masked from the gradient generator. They are not
+taken into account for computing gradients.
+
+This is useful any time you want to compute a value with TensorFlow but need
+to pretend that the value was a constant. Some examples include:
+
+* The *EM* algorithm where the *M-step* should not involve backpropagation
+ through the output of the *E-step*.
+* Contrastive divergence training of Boltzmann machines where, when
+ differentiating the energy function, the training must not backpropagate
+ through the graph that generated the samples from the model.
+* Adversarial training, where no backprop should happen through the adversarial
+ example generation process.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StridedSlice.pbtxt b/tensorflow/core/api_def/base_api/api_def_StridedSlice.pbtxt
new file mode 100644
index 0000000000..8d6fc04847
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StridedSlice.pbtxt
@@ -0,0 +1,167 @@
+op {
+ graph_op_name: "StridedSlice"
+ in_arg {
+ name: "begin"
+ description: <<END
+`begin[k]` specifies the offset into the `k`th range specification.
+The exact dimension this corresponds to will be determined by context.
+Out-of-bounds values will be silently clamped. If the `k`th bit of
+`begin_mask` then `begin[k]` is ignored and the full range of the
+appropriate dimension is used instead. Negative values causes indexing
+to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
+END
+ }
+ in_arg {
+ name: "end"
+ description: <<END
+`end[i]` is like `begin` with the exception that `end_mask` is
+used to determine full ranges.
+END
+ }
+ in_arg {
+ name: "strides"
+ description: <<END
+`strides[i]` specifies the increment in the `i`th specification
+after extracting a given element. Negative indices will reverse
+the original order. Out or range values are
+clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
+END
+ }
+ attr {
+ name: "begin_mask"
+ description: <<END
+a bitmask where a bit i being 1 means to ignore the begin
+value and instead use the largest interval possible. At runtime
+begin[i] will be replaced with `[0, n-1) if `stride[i] > 0` or
+`[-1, n-1]` if `stride[i] < 0`
+END
+ }
+ attr {
+ name: "end_mask"
+ description: <<END
+analogous to `begin_mask`
+END
+ }
+ attr {
+ name: "ellipsis_mask"
+ description: <<END
+a bitmask where bit `i` being 1 means the `i`th
+position is actually an ellipsis. One bit at most can be 1.
+If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`
+is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis
+implicitly creates as many range specifications as necessary to fully
+specify the sliced range for every dimension. For example for a 4-dimensional
+tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.
+END
+ }
+ attr {
+ name: "new_axis_mask"
+ description: <<END
+a bitmask where bit `i` being 1 means the `i`th
+specification creates a new shape 1 dimension. For example
+`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
+END
+ }
+ attr {
+ name: "shrink_axis_mask"
+ description: <<END
+a bitmask where bit `i` implies that the `i`th
+specification should shrink the dimensionality. begin and end
+must imply a slice of size 1 in the dimension. For example in
+python one might do `foo[:, 3, :]` which would result in
+`shrink_axis_mask` being 2.
+END
+ }
+ summary: "Return a strided slice from `input`."
+ description: <<END
+Note, most python users will want to use the Python `Tensor.__getitem__`
+or `Variable.__getitem__` rather than this op directly.
+
+The goal of this op is to produce a new tensor with a subset of
+the elements from the `n` dimensional `input` tensor. The subset is chosen using
+a sequence of `m` sparse range specifications encoded into the arguments
+of this function. Note, in some cases
+`m` could be equal to `n`, but this need not be the case. Each
+range specification entry can be one of the following:
+
+- An ellipsis (...). Ellipses are used to imply zero or more
+ dimensions of full-dimension selection and are produced using
+ `ellipsis_mask`. For example, `foo[...]` is the identity slice.
+
+- A new axis. This is used to insert a new shape=1 dimension and is
+ produced using `new_axis_mask`. For example, `foo[:, ...]` where
+ `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
+
+
+- A range `begin:end:stride`. This is used to specify how much to choose from
+ a given dimension. `stride` can be any integer but 0. `begin` is an integer
+ which represents the index of the first value to select while `end` represents
+ the index of the last value to select. The number of values selected in each
+ dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
+ `begin` and `end` can be negative where `-1` is the last element, `-2` is
+ the second to last. `begin_mask` controls whether to replace the explicitly
+ given `begin` with an implicit effective value of `0` if `stride > 0` and
+ `-1` if `stride < 0`. `end_mask` is analogous but produces the number
+ required to create the largest open interval. For example, given a shape
+ `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
+ not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
+ and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
+ first dimension of a tensor while dropping the last two (in the original
+ order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
+
+- A single index. This is used to keep only elements that have a given
+ index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
+ shape `(6,)` tensor. This is encoded in `begin` and `end` and
+ `shrink_axis_mask`.
+
+Each conceptual range specification is encoded in the op's argument. This
+encoding is best understand by considering a non-trivial example. In
+particular,
+`foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
+
+```
+begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
+end = [2, 4, x, x, -3, x]
+strides = [1, 1, x, x, -1, 1]
+begin_mask = 1<<4 | 1 << 5 = 48
+end_mask = 1<<5 = 32
+ellipsis_mask = 1<<3 = 8
+new_axis_mask = 1<<2 4
+shrink_axis_mask = 1<<0
+```
+
+In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
+the slice becomes (2, 1, 5, 5, 2, 5).
+Let us walk step by step through each argument specification.
+
+1. The first argument in the example slice is turned into `begin = 1` and
+`end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
+also set the appropriate bit in `shrink_axis_mask`.
+
+2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
+zero bits contributed.
+
+3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
+dimension in the final shape. Dummy values are contributed to begin,
+end and stride, while the new_axis_mask bit is set.
+
+4. `...` grab the full ranges from as many dimensions as needed to
+fully specify a slice for every dimension of the input shape.
+
+5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
+with a dimension that has shape `s` is converted to a positive index
+`s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
+is done internally so begin, end and strides receive x, -3, and -1.
+The appropriate begin_mask bit is set to indicate the start range is the
+full range (ignoring the x).
+
+6. `:` indicates that the entire contents of the corresponding dimension
+is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
+receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
+`end_mask` are also set.
+
+*Requirements*:
+ `0 != strides[i] for i in [0, m)`
+ `ellipsis_mask must be a power of two (only one ellipsis)`
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StridedSliceAssign.pbtxt b/tensorflow/core/api_def/base_api/api_def_StridedSliceAssign.pbtxt
new file mode 100644
index 0000000000..0fc89576ad
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StridedSliceAssign.pbtxt
@@ -0,0 +1,12 @@
+op {
+ graph_op_name: "StridedSliceAssign"
+ summary: "Assign `value` to the sliced l-value reference of `ref`."
+ description: <<END
+The values of `value` are assigned to the positions in the variable
+`ref` that are selected by the slice parameters. The slice parameters
+`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
+
+NOTE this op currently does not support broadcasting and so `value`'s
+shape must be exactly the shape produced by the slice of `ref`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StridedSliceGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_StridedSliceGrad.pbtxt
new file mode 100644
index 0000000000..c5ea059e8a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StridedSliceGrad.pbtxt
@@ -0,0 +1,14 @@
+op {
+ graph_op_name: "StridedSliceGrad"
+ summary: "Returns the gradient of `StridedSlice`."
+ description: <<END
+Since `StridedSlice` cuts out pieces of its `input` which is size
+`shape`, its gradient will have the same shape (which is passed here
+as `shape`). The gradient will be zero in any element that the slice
+does not select.
+
+Arguments are the same as StridedSliceGrad with the exception that
+`dy` is the input gradient to be propagated and `shape` is the
+shape of `StridedSlice`'s `input`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StringJoin.pbtxt b/tensorflow/core/api_def/base_api/api_def_StringJoin.pbtxt
new file mode 100644
index 0000000000..549ee43413
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StringJoin.pbtxt
@@ -0,0 +1,21 @@
+op {
+ graph_op_name: "StringJoin"
+ in_arg {
+ name: "inputs"
+ description: <<END
+A list of string tensors. The tensors must all have the same shape,
+or be scalars. Scalars may be mixed in; these will be broadcast to the shape
+of non-scalar inputs.
+END
+ }
+ attr {
+ name: "separator"
+ description: <<END
+string, an optional join separator.
+END
+ }
+ summary: "Joins the strings in the given list of string tensors into one tensor;"
+ description: <<END
+with the given separator (default is an empty separator).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StringSplit.pbtxt b/tensorflow/core/api_def/base_api/api_def_StringSplit.pbtxt
new file mode 100644
index 0000000000..4792f298ee
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StringSplit.pbtxt
@@ -0,0 +1,64 @@
+op {
+ graph_op_name: "StringSplit"
+ in_arg {
+ name: "input"
+ description: <<END
+1-D. Strings to split.
+END
+ }
+ in_arg {
+ name: "delimiter"
+ description: <<END
+0-D. Delimiter characters (bytes), or empty string.
+END
+ }
+ out_arg {
+ name: "indices"
+ description: <<END
+A dense matrix of int64 representing the indices of the sparse tensor.
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+A vector of strings corresponding to the splited values.
+END
+ }
+ out_arg {
+ name: "shape"
+ description: <<END
+a length-2 vector of int64 representing the shape of the sparse
+tensor, where the first value is N and the second value is the maximum number
+of tokens in a single input entry.
+END
+ }
+ attr {
+ name: "skip_empty"
+ description: <<END
+A `bool`. If `True`, skip the empty strings from the result.
+END
+ }
+ summary: "Split elements of `input` based on `delimiter` into a `SparseTensor`."
+ description: <<END
+Let N be the size of source (typically N will be the batch size). Split each
+element of `input` based on `delimiter` and return a `SparseTensor`
+containing the splitted tokens. Empty tokens are ignored.
+
+`delimiter` can be empty, or a string of split characters. If `delimiter` is an
+ empty string, each element of `input` is split into individual single-byte
+ character strings, including splitting of UTF-8 multibyte sequences. Otherwise
+ every character of `delimiter` is a potential split point.
+
+For example:
+ N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
+ will be
+
+ indices = [0, 0;
+ 0, 1;
+ 1, 0;
+ 1, 1;
+ 1, 2]
+ shape = [2, 3]
+ values = ['hello', 'world', 'a', 'b', 'c']
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StringToHashBucket.pbtxt b/tensorflow/core/api_def/base_api/api_def_StringToHashBucket.pbtxt
new file mode 100644
index 0000000000..af49dbd161
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StringToHashBucket.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "StringToHashBucket"
+ out_arg {
+ name: "output"
+ description: <<END
+A Tensor of the same shape as the input `string_tensor`.
+END
+ }
+ attr {
+ name: "num_buckets"
+ description: <<END
+The number of buckets.
+END
+ }
+ summary: "Converts each string in the input Tensor to its hash mod by a number of buckets."
+ description: <<END
+The hash function is deterministic on the content of the string within the
+process.
+
+Note that the hash function may change from time to time.
+This functionality will be deprecated and it's recommended to use
+`tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StringToHashBucketFast.pbtxt b/tensorflow/core/api_def/base_api/api_def_StringToHashBucketFast.pbtxt
new file mode 100644
index 0000000000..a68d54a534
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StringToHashBucketFast.pbtxt
@@ -0,0 +1,30 @@
+op {
+ graph_op_name: "StringToHashBucketFast"
+ in_arg {
+ name: "input"
+ description: <<END
+The strings to assign a hash bucket.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A Tensor of the same shape as the input `string_tensor`.
+END
+ }
+ attr {
+ name: "num_buckets"
+ description: <<END
+The number of buckets.
+END
+ }
+ summary: "Converts each string in the input Tensor to its hash mod by a number of buckets."
+ description: <<END
+The hash function is deterministic on the content of the string within the
+process and will never change. However, it is not suitable for cryptography.
+This function may be used when CPU time is scarce and inputs are trusted or
+unimportant. There is a risk of adversaries constructing inputs that all hash
+to the same bucket. To prevent this problem, use a strong hash function with
+`tf.string_to_hash_bucket_strong`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StringToHashBucketStrong.pbtxt b/tensorflow/core/api_def/base_api/api_def_StringToHashBucketStrong.pbtxt
new file mode 100644
index 0000000000..b63fbd1ff9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StringToHashBucketStrong.pbtxt
@@ -0,0 +1,41 @@
+op {
+ graph_op_name: "StringToHashBucketStrong"
+ in_arg {
+ name: "input"
+ description: <<END
+The strings to assign a hash bucket.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A Tensor of the same shape as the input `string_tensor`.
+END
+ }
+ attr {
+ name: "num_buckets"
+ description: <<END
+The number of buckets.
+END
+ }
+ attr {
+ name: "key"
+ description: <<END
+The key for the keyed hash function passed as a list of two uint64
+elements.
+END
+ }
+ summary: "Converts each string in the input Tensor to its hash mod by a number of buckets."
+ description: <<END
+The hash function is deterministic on the content of the string within the
+process. The hash function is a keyed hash function, where attribute `key`
+defines the key of the hash function. `key` is an array of 2 elements.
+
+A strong hash is important when inputs may be malicious, e.g. URLs with
+additional components. Adversaries could try to make their inputs hash to the
+same bucket for a denial-of-service attack or to skew the results. A strong
+hash prevents this by making it difficult, if not infeasible, to compute inputs
+that hash to the same bucket. This comes at a cost of roughly 4x higher compute
+time than `tf.string_to_hash_bucket_fast`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_StringToNumber.pbtxt b/tensorflow/core/api_def/base_api/api_def_StringToNumber.pbtxt
new file mode 100644
index 0000000000..e6e0b1dc13
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_StringToNumber.pbtxt
@@ -0,0 +1,20 @@
+op {
+ graph_op_name: "StringToNumber"
+ out_arg {
+ name: "output"
+ description: <<END
+A Tensor of the same shape as the input `string_tensor`.
+END
+ }
+ attr {
+ name: "out_type"
+ description: <<END
+The numeric type to interpret each string in `string_tensor` as.
+END
+ }
+ summary: "Converts each string in the input Tensor to the specified numeric type."
+ description: <<END
+(Note that int32 overflow results in an error while float overflow
+results in a rounded value.)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Sub.pbtxt b/tensorflow/core/api_def/base_api/api_def_Sub.pbtxt
new file mode 100644
index 0000000000..73b82d6ac8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Sub.pbtxt
@@ -0,0 +1,14 @@
+op {
+ graph_op_name: "Sub"
+ endpoint {
+ name: "Subtract"
+ }
+ endpoint {
+ name: "Sub"
+ }
+ summary: "Returns x - y element-wise."
+ description: <<END
+*NOTE*: `Sub` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Substr.pbtxt b/tensorflow/core/api_def/base_api/api_def_Substr.pbtxt
new file mode 100644
index 0000000000..8fc1e5cba3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Substr.pbtxt
@@ -0,0 +1,103 @@
+op {
+ graph_op_name: "Substr"
+ in_arg {
+ name: "input"
+ description: <<END
+Tensor of strings
+END
+ }
+ in_arg {
+ name: "pos"
+ description: <<END
+Scalar defining the position of first character in each substring
+END
+ }
+ in_arg {
+ name: "len"
+ description: <<END
+Scalar defining the number of characters to include in each substring
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Tensor of substrings
+END
+ }
+ summary: "Return substrings from `Tensor` of strings."
+ description: <<END
+For each string in the input `Tensor`, creates a substring starting at index
+`pos` with a total length of `len`.
+
+If `len` defines a substring that would extend beyond the length of the input
+string, then as many characters as possible are used.
+
+If `pos` is negative or specifies a character index larger than any of the input
+strings, then an `InvalidArgumentError` is thrown.
+
+`pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on
+Op creation.
+
+*NOTE*: `Substr` supports broadcasting up to two dimensions. More about
+broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+
+---
+
+Examples
+
+Using scalar `pos` and `len`:
+
+```python
+input = [b'Hello', b'World']
+position = 1
+length = 3
+
+output = [b'ell', b'orl']
+```
+
+Using `pos` and `len` with same shape as `input`:
+
+```python
+input = [[b'ten', b'eleven', b'twelve'],
+ [b'thirteen', b'fourteen', b'fifteen'],
+ [b'sixteen', b'seventeen', b'eighteen']]
+position = [[1, 2, 3],
+ [1, 2, 3],
+ [1, 2, 3]]
+length = [[2, 3, 4],
+ [4, 3, 2],
+ [5, 5, 5]]
+
+output = [[b'en', b'eve', b'lve'],
+ [b'hirt', b'urt', b'te'],
+ [b'ixtee', b'vente', b'hteen']]
+```
+
+Broadcasting `pos` and `len` onto `input`:
+
+```
+input = [[b'ten', b'eleven', b'twelve'],
+ [b'thirteen', b'fourteen', b'fifteen'],
+ [b'sixteen', b'seventeen', b'eighteen'],
+ [b'nineteen', b'twenty', b'twentyone']]
+position = [1, 2, 3]
+length = [1, 2, 3]
+
+output = [[b'e', b'ev', b'lve'],
+ [b'h', b'ur', b'tee'],
+ [b'i', b've', b'hte'],
+ [b'i', b'en', b'nty']]
+```
+
+Broadcasting `input` onto `pos` and `len`:
+
+```
+input = b'thirteen'
+position = [1, 5, 7]
+length = [3, 2, 1]
+
+output = [b'hir', b'ee', b'n']
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Sum.pbtxt b/tensorflow/core/api_def/base_api/api_def_Sum.pbtxt
new file mode 100644
index 0000000000..295d5b86c0
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Sum.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "Sum"
+ endpoint {
+ name: "Sum"
+ }
+ endpoint {
+ name: "ReduceSum"
+ }
+ in_arg {
+ name: "input"
+ description: <<END
+The tensor to reduce.
+END
+ }
+ in_arg {
+ name: "reduction_indices"
+ rename_to: "axis"
+ description: <<END
+The dimensions to reduce. Must be in the range
+`[-rank(input), rank(input))`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The reduced tensor.
+END
+ }
+ attr {
+ name: "keep_dims"
+ description: <<END
+If true, retain reduced dimensions with length 1.
+END
+ }
+ summary: "Computes the sum of elements across dimensions of a tensor."
+ description: <<END
+Reduces `input` along the dimensions given in `reduction_indices`. Unless
+`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+`reduction_indices`. If `keep_dims` is true, the reduced dimensions are
+retained with length 1.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Svd.pbtxt b/tensorflow/core/api_def/base_api/api_def_Svd.pbtxt
new file mode 100644
index 0000000000..3ec746a117
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Svd.pbtxt
@@ -0,0 +1,62 @@
+op {
+ graph_op_name: "Svd"
+ in_arg {
+ name: "input"
+ description: <<END
+A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
+form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
+END
+ }
+ out_arg {
+ name: "s"
+ description: <<END
+Singular values. Shape is `[..., P]`.
+END
+ }
+ out_arg {
+ name: "u"
+ description: <<END
+Left singular vectors. If `full_matrices` is `False` then shape is
+`[..., M, P]`; if `full_matrices` is `True` then shape is
+`[..., M, M]`. Undefined if `compute_uv` is `False`.
+END
+ }
+ out_arg {
+ name: "v"
+ description: <<END
+Left singular vectors. If `full_matrices` is `False` then shape is
+`[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
+Undefined if `compute_uv` is false.
+END
+ }
+ attr {
+ name: "compute_uv"
+ description: <<END
+If true, left and right singular vectors will be
+computed and returned in `u` and `v`, respectively.
+If false, `u` and `v` are not set and should never referenced.
+END
+ }
+ attr {
+ name: "full_matrices"
+ description: <<END
+If true, compute full-sized `u` and `v`. If false
+(the default), compute only the leading `P` singular vectors.
+Ignored if `compute_uv` is `False`.
+END
+ }
+ summary: "Computes the singular value decompositions of one or more matrices."
+ description: <<END
+Computes the SVD of each inner matrix in `input` such that
+`input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
+
+```python
+# a is a tensor containing a batch of matrices.
+# s is a tensor of singular values for each matrix.
+# u is the tensor containing of left singular vectors for each matrix.
+# v is the tensor containing of right singular vectors for each matrix.
+s, u, v = svd(a)
+s, _, _ = svd(a, compute_uv=False)
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Switch.pbtxt b/tensorflow/core/api_def/base_api/api_def_Switch.pbtxt
new file mode 100644
index 0000000000..5b9206df74
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Switch.pbtxt
@@ -0,0 +1,34 @@
+op {
+ graph_op_name: "Switch"
+ in_arg {
+ name: "data"
+ description: <<END
+The tensor to be forwarded to the appropriate output.
+END
+ }
+ in_arg {
+ name: "pred"
+ description: <<END
+A scalar that specifies which output port will receive data.
+END
+ }
+ out_arg {
+ name: "output_false"
+ description: <<END
+If `pred` is false, data will be forwarded to this output.
+END
+ }
+ out_arg {
+ name: "output_true"
+ description: <<END
+If `pred` is true, data will be forwarded to this output.
+END
+ }
+ summary: "Forwards `data` to the output port determined by `pred`."
+ description: <<END
+If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
+the data goes to `output_false`.
+
+See also `RefSwitch` and `Merge`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_SymbolicGradient.pbtxt b/tensorflow/core/api_def/base_api/api_def_SymbolicGradient.pbtxt
new file mode 100644
index 0000000000..b5cb6dbc12
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_SymbolicGradient.pbtxt
@@ -0,0 +1,51 @@
+op {
+ graph_op_name: "SymbolicGradient"
+ in_arg {
+ name: "input"
+ description: <<END
+a list of input tensors of size N + M;
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+a list of output tensors of size N;
+END
+ }
+ attr {
+ name: "Tin"
+ description: <<END
+the type list for the input list.
+END
+ }
+ attr {
+ name: "Tout"
+ description: <<END
+the type list for the input list.
+END
+ }
+ attr {
+ name: "f"
+ description: <<END
+The function we want to compute the gradient for.
+
+The function 'f' must be a numerical function which takes N inputs and
+produces M outputs. Its gradient function 'g', which is computed by
+this SymbolicGradient op is a function taking N + M inputs and
+produces N outputs.
+
+I.e. if we have
+ (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),
+then, g is
+ (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,
+ dL/dy1, dL/dy2, ..., dL/dy_M),
+
+where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the
+loss function). dL/dx_i is the partial derivative of L with respect
+to x_i.
+
+(Needs some math expert to say the comment above better.)
+END
+ }
+ summary: "Computes the gradient function for function f via backpropagation."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_T.pbtxt b/tensorflow/core/api_def/base_api/api_def_T.pbtxt
deleted file mode 100644
index 8d1cbbcc06..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_T.pbtxt
+++ /dev/null
@@ -1,619 +0,0 @@
-op {
- graph_op_name: "TFRecordDataset"
- endpoint {
- name: "TFRecordDataset"
- }
- summary: "Creates a dataset that emits the records from one or more TFRecord files."
-}
-op {
- graph_op_name: "TFRecordReader"
- endpoint {
- name: "TFRecordReader"
- }
- summary: "A Reader that outputs the records from a TensorFlow Records file."
-}
-op {
- graph_op_name: "TFRecordReaderV2"
- endpoint {
- name: "TFRecordReaderV2"
- }
- summary: "A Reader that outputs the records from a TensorFlow Records file."
-}
-op {
- graph_op_name: "TakeDataset"
- endpoint {
- name: "TakeDataset"
- }
- summary: "Creates a dataset that contains `count` elements from the `input_dataset`."
-}
-op {
- graph_op_name: "TakeManySparseFromTensorsMap"
- endpoint {
- name: "TakeManySparseFromTensorsMap"
- }
- summary: "Read `SparseTensors` from a `SparseTensorsMap` and concatenate them."
- description: <<END
-The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
-`N` is the minibatch size and the rows correspond to the output handles of
-`AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the
-original `SparseTensor` objects that went into the given input ops must all
-match. When the final `SparseTensor` is created, it has rank one
-higher than the ranks of the incoming `SparseTensor` objects
-(they have been concatenated along a new row dimension on the left).
-
-The output `SparseTensor` object's shape values for all dimensions but the
-first are the max across the input `SparseTensor` objects' shape values
-for the corresponding dimensions. Its first shape value is `N`, the minibatch
-size.
-
-The input `SparseTensor` objects' indices are assumed ordered in
-standard lexicographic order. If this is not the case, after this
-step run `SparseReorder` to restore index ordering.
-
-For example, if the handles represent an input, which is a `[2, 3]` matrix
-representing two original `SparseTensor` objects:
-
-```
- index = [ 0]
- [10]
- [20]
- values = [1, 2, 3]
- shape = [50]
-```
-
-and
-
-```
- index = [ 2]
- [10]
- values = [4, 5]
- shape = [30]
-```
-
-then the final `SparseTensor` will be:
-
-```
- index = [0 0]
- [0 10]
- [0 20]
- [1 2]
- [1 10]
- values = [1, 2, 3, 4, 5]
- shape = [2 50]
-```
-END
-}
-op {
- graph_op_name: "Tan"
- endpoint {
- name: "Tan"
- }
- summary: "Computes tan of x element-wise."
-}
-op {
- graph_op_name: "Tanh"
- endpoint {
- name: "Tanh"
- }
- summary: "Computes hyperbolic tangent of `x` element-wise."
-}
-op {
- graph_op_name: "TanhGrad"
- endpoint {
- name: "TanhGrad"
- }
- summary: "Computes the gradient for the tanh of `x` wrt its input."
- description: <<END
-Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
-is the corresponding input gradient.
-END
-}
-op {
- graph_op_name: "TemporaryVariable"
- endpoint {
- name: "TemporaryVariable"
- }
- summary: "Returns a tensor that may be mutated, but only persists within a single step."
- description: <<END
-This is an experimental op for internal use only and it is possible to use this
-op in unsafe ways. DO NOT USE unless you fully understand the risks.
-
-It is the caller's responsibility to ensure that 'ref' is eventually passed to a
-matching 'DestroyTemporaryVariable' op after all other uses have completed.
-
-Outputs a ref to the tensor state so it may be read or modified.
-
- E.g.
- var = state_ops._temporary_variable([1, 2], types.float_)
- var_name = var.op.name
- var = state_ops.assign(var, [[4.0, 5.0]])
- var = state_ops.assign_add(var, [[6.0, 7.0]])
- final = state_ops._destroy_temporary_variable(var, var_name=var_name)
-END
-}
-op {
- graph_op_name: "TensorArray"
- endpoint {
- name: "TensorArray"
- }
-}
-op {
- graph_op_name: "TensorArrayClose"
- endpoint {
- name: "TensorArrayClose"
- }
-}
-op {
- graph_op_name: "TensorArrayCloseV2"
- endpoint {
- name: "TensorArrayCloseV2"
- }
- summary: "Deprecated. Use TensorArrayCloseV3"
-}
-op {
- graph_op_name: "TensorArrayCloseV3"
- endpoint {
- name: "TensorArrayCloseV3"
- }
- summary: "Delete the TensorArray from its resource container."
- description: <<END
-This enables the user to close and release the resource in the middle
-of a step/run.
-END
-}
-op {
- graph_op_name: "TensorArrayConcat"
- endpoint {
- name: "TensorArrayConcat"
- }
-}
-op {
- graph_op_name: "TensorArrayConcatV2"
- endpoint {
- name: "TensorArrayConcatV2"
- }
- summary: "Deprecated. Use TensorArrayConcatV3"
-}
-op {
- graph_op_name: "TensorArrayConcatV3"
- endpoint {
- name: "TensorArrayConcatV3"
- }
- summary: "Concat the elements from the TensorArray into value `value`."
- description: <<END
-Takes `T` elements of shapes
-
- ```
- (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
- ```
-
-and concatenates them into a Tensor of shape:
-
- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
-
-All elements must have the same shape (excepting the first dimension).
-END
-}
-op {
- graph_op_name: "TensorArrayGather"
- endpoint {
- name: "TensorArrayGather"
- }
-}
-op {
- graph_op_name: "TensorArrayGatherV2"
- endpoint {
- name: "TensorArrayGatherV2"
- }
- summary: "Deprecated. Use TensorArrayGatherV3"
-}
-op {
- graph_op_name: "TensorArrayGatherV3"
- endpoint {
- name: "TensorArrayGatherV3"
- }
- summary: "Gather specific elements from the TensorArray into output `value`."
- description: <<END
-All elements selected by `indices` must have the same shape.
-END
-}
-op {
- graph_op_name: "TensorArrayGrad"
- endpoint {
- name: "TensorArrayGrad"
- }
-}
-op {
- graph_op_name: "TensorArrayGradV2"
- endpoint {
- name: "TensorArrayGradV2"
- }
- summary: "Deprecated. Use TensorArrayGradV3"
-}
-op {
- graph_op_name: "TensorArrayGradV3"
- endpoint {
- name: "TensorArrayGradV3"
- }
- summary: "Creates a TensorArray for storing the gradients of values in the given handle."
- description: <<END
-If the given TensorArray gradient already exists, returns a reference to it.
-
-Locks the size of the original TensorArray by disabling its dynamic size flag.
-
-**A note about the input flow_in:**
-
-The handle flow_in forces the execution of the gradient lookup to occur
-only after certain other operations have occurred. For example, when
-the forward TensorArray is dynamically sized, writes to this TensorArray
-may resize the object. The gradient TensorArray is statically sized based
-on the size of the forward TensorArray when this operation executes.
-Furthermore, the size of the forward TensorArray is frozen by this call.
-As a result, the flow is used to ensure that the call to generate the gradient
-TensorArray only happens after all writes are executed.
-
-In the case of dynamically sized TensorArrays, gradient computation should
-only be performed on read operations that have themselves been chained via
-flow to occur only after all writes have executed. That way the final size
-of the forward TensorArray is known when this operation is called.
-
-**A note about the source attribute:**
-
-TensorArray gradient calls use an accumulator TensorArray object. If
-multiple gradients are calculated and run in the same session, the multiple
-gradient nodes may accidentally flow through the same accumulator TensorArray.
-This double counts and generally breaks the TensorArray gradient flow.
-
-The solution is to identify which gradient call this particular
-TensorArray gradient is being called in. This is performed by identifying
-a unique string (e.g. "gradients", "gradients_1", ...) from the input
-gradient Tensor's name. This string is used as a suffix when creating
-the TensorArray gradient object here (the attribute `source`).
-
-The attribute `source` is added as a suffix to the forward TensorArray's
-name when performing the creation / lookup, so that each separate gradient
-calculation gets its own TensorArray accumulator.
-END
-}
-op {
- graph_op_name: "TensorArrayPack"
- endpoint {
- name: "TensorArrayPack"
- }
-}
-op {
- graph_op_name: "TensorArrayRead"
- endpoint {
- name: "TensorArrayRead"
- }
-}
-op {
- graph_op_name: "TensorArrayReadV2"
- endpoint {
- name: "TensorArrayReadV2"
- }
- summary: "Deprecated. Use TensorArrayReadV3"
-}
-op {
- graph_op_name: "TensorArrayReadV3"
- endpoint {
- name: "TensorArrayReadV3"
- }
- summary: "Read an element from the TensorArray into output `value`."
-}
-op {
- graph_op_name: "TensorArrayScatter"
- endpoint {
- name: "TensorArrayScatter"
- }
-}
-op {
- graph_op_name: "TensorArrayScatterV2"
- endpoint {
- name: "TensorArrayScatterV2"
- }
- summary: "Deprecated. Use TensorArrayScatterV3"
-}
-op {
- graph_op_name: "TensorArrayScatterV3"
- endpoint {
- name: "TensorArrayScatterV3"
- }
- summary: "Scatter the data from the input value into specific TensorArray elements."
- description: <<END
-`indices` must be a vector, its length must match the first dim of `value`.
-END
-}
-op {
- graph_op_name: "TensorArraySize"
- endpoint {
- name: "TensorArraySize"
- }
-}
-op {
- graph_op_name: "TensorArraySizeV2"
- endpoint {
- name: "TensorArraySizeV2"
- }
- summary: "Deprecated. Use TensorArraySizeV3"
-}
-op {
- graph_op_name: "TensorArraySizeV3"
- endpoint {
- name: "TensorArraySizeV3"
- }
- summary: "Get the current size of the TensorArray."
-}
-op {
- graph_op_name: "TensorArraySplit"
- endpoint {
- name: "TensorArraySplit"
- }
-}
-op {
- graph_op_name: "TensorArraySplitV2"
- endpoint {
- name: "TensorArraySplitV2"
- }
- summary: "Deprecated. Use TensorArraySplitV3"
-}
-op {
- graph_op_name: "TensorArraySplitV3"
- endpoint {
- name: "TensorArraySplitV3"
- }
- summary: "Split the data from the input value into TensorArray elements."
- description: <<END
-Assuming that `lengths` takes on values
-
- ```(n0, n1, ..., n(T-1))```
-
-and that `value` has shape
-
- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
-
-this splits values into a TensorArray with T tensors.
-
-TensorArray index t will be the subtensor of values with starting position
-
- ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
-
-and having size
-
- ```nt x d0 x d1 x ...```
-END
-}
-op {
- graph_op_name: "TensorArrayUnpack"
- endpoint {
- name: "TensorArrayUnpack"
- }
-}
-op {
- graph_op_name: "TensorArrayV2"
- endpoint {
- name: "TensorArrayV2"
- }
- summary: "Deprecated. Use TensorArrayV3"
-}
-op {
- graph_op_name: "TensorArrayV3"
- endpoint {
- name: "TensorArrayV3"
- }
- summary: "An array of Tensors of given size."
- description: <<END
-Write data via Write and read via Read or Pack.
-END
-}
-op {
- graph_op_name: "TensorArrayWrite"
- endpoint {
- name: "TensorArrayWrite"
- }
-}
-op {
- graph_op_name: "TensorArrayWriteV2"
- endpoint {
- name: "TensorArrayWriteV2"
- }
- summary: "Deprecated. Use TensorArrayGradV3"
-}
-op {
- graph_op_name: "TensorArrayWriteV3"
- endpoint {
- name: "TensorArrayWriteV3"
- }
- summary: "Push an element onto the tensor_array."
-}
-op {
- graph_op_name: "TensorDataset"
- endpoint {
- name: "TensorDataset"
- }
- summary: "Creates a dataset that emits `components` as a tuple of tensors once."
-}
-op {
- graph_op_name: "TensorSliceDataset"
- endpoint {
- name: "TensorSliceDataset"
- }
- summary: "Creates a dataset that emits each dim-0 slice of `components` once."
-}
-op {
- graph_op_name: "TensorSummary"
- endpoint {
- name: "TensorSummary"
- }
- summary: "Outputs a `Summary` protocol buffer with a tensor."
- description: <<END
-This op is being phased out in favor of TensorSummaryV2, which lets callers pass
-a tag as well as a serialized SummaryMetadata proto string that contains
-plugin-specific data. We will keep this op to maintain backwards compatibility.
-END
-}
-op {
- graph_op_name: "TensorSummaryV2"
- endpoint {
- name: "TensorSummaryV2"
- }
- summary: "Outputs a `Summary` protocol buffer with a tensor and per-plugin data."
-}
-op {
- graph_op_name: "TextLineDataset"
- endpoint {
- name: "TextLineDataset"
- }
- summary: "Creates a dataset that emits the lines of one or more text files."
-}
-op {
- graph_op_name: "TextLineReader"
- endpoint {
- name: "TextLineReader"
- }
- summary: "A Reader that outputs the lines of a file delimited by \'\\n\'."
-}
-op {
- graph_op_name: "TextLineReaderV2"
- endpoint {
- name: "TextLineReaderV2"
- }
- summary: "A Reader that outputs the lines of a file delimited by \'\\n\'."
-}
-op {
- graph_op_name: "ThreadUnsafeUnigramCandidateSampler"
- endpoint {
- name: "ThreadUnsafeUnigramCandidateSampler"
- }
- summary: "Generates labels for candidate sampling with a learned unigram distribution."
- description: <<END
-See explanations of candidate sampling and the data formats at
-go/candidate-sampling.
-
-For each batch, this op picks a single set of sampled candidate labels.
-
-The advantages of sampling candidates per-batch are simplicity and the
-possibility of efficient dense matrix multiplication. The disadvantage is that
-the sampled candidates must be chosen independently of the context and of the
-true labels.
-END
-}
-op {
- graph_op_name: "Tile"
- endpoint {
- name: "Tile"
- }
- summary: "Constructs a tensor by tiling a given tensor."
- description: <<END
-This operation creates a new tensor by replicating `input` `multiples` times.
-The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
-and the values of `input` are replicated `multiples[i]` times along the 'i'th
-dimension. For example, tiling `[a b c d]` by `[2]` produces
-`[a b c d a b c d]`.
-END
-}
-op {
- graph_op_name: "TileGrad"
- endpoint {
- name: "TileGrad"
- }
- summary: "Returns the gradient of `Tile`."
- description: <<END
-Since `Tile` takes an input and repeats the input `multiples` times
-along each dimension, `TileGrad` takes in `multiples` and aggregates
-each repeated tile of `input` into `output`.
-END
-}
-op {
- graph_op_name: "TopK"
- endpoint {
- name: "TopK"
- }
- summary: "Finds values and indices of the `k` largest elements for the last dimension."
- description: <<END
-If the input is a vector (rank-1), finds the `k` largest entries in the vector
-and outputs their values and indices as vectors. Thus `values[j]` is the
-`j`-th largest entry in `input`, and its index is `indices[j]`.
-
-For matrices (resp. higher rank input), computes the top `k` entries in each
-row (resp. vector along the last dimension). Thus,
-
- values.shape = indices.shape = input.shape[:-1] + [k]
-
-If two elements are equal, the lower-index element appears first.
-
-If `k` varies dynamically, use `TopKV2` below.
-END
-}
-op {
- graph_op_name: "TopKV2"
- endpoint {
- name: "TopKV2"
- }
- summary: "Finds values and indices of the `k` largest elements for the last dimension."
- description: <<END
-If the input is a vector (rank-1), finds the `k` largest entries in the vector
-and outputs their values and indices as vectors. Thus `values[j]` is the
-`j`-th largest entry in `input`, and its index is `indices[j]`.
-
-For matrices (resp. higher rank input), computes the top `k` entries in each
-row (resp. vector along the last dimension). Thus,
-
- values.shape = indices.shape = input.shape[:-1] + [k]
-
-If two elements are equal, the lower-index element appears first.
-END
-}
-op {
- graph_op_name: "Transpose"
- endpoint {
- name: "Transpose"
- }
- summary: "Shuffle dimensions of x according to a permutation."
- description: <<END
-The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
- `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
-END
-}
-op {
- graph_op_name: "TruncateDiv"
- endpoint {
- name: "TruncateDiv"
- }
- summary: "Returns x / y element-wise for integer types."
- description: <<END
-Truncation designates that negative numbers will round fractional quantities
-toward zero. I.e. -7 / 5 = 1. This matches C semantics but it is different
-than Python semantics. See `FloorDiv` for a division function that matches
-Python Semantics.
-
-*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "TruncateMod"
- endpoint {
- name: "TruncateMod"
- }
- summary: "Returns element-wise remainder of division. This emulates C semantics in that"
- description: <<END
-the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
-y + truncate_mod(x, y) = x`.
-
-*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
-[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-END
-}
-op {
- graph_op_name: "TruncatedNormal"
- endpoint {
- name: "TruncatedNormal"
- }
- summary: "Outputs random values from a truncated normal distribution."
- description: <<END
-The generated values follow a normal distribution with mean 0 and standard
-deviation 1, except that values whose magnitude is more than 2 standard
-deviations from the mean are dropped and re-picked.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_TFRecordDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_TFRecordDataset.pbtxt
new file mode 100644
index 0000000000..80f64cebb1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TFRecordDataset.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "TFRecordDataset"
+ in_arg {
+ name: "filenames"
+ description: <<END
+A scalar or vector containing the name(s) of the file(s) to be
+read.
+END
+ }
+ in_arg {
+ name: "compression_type"
+ description: <<END
+A scalar containing either (i) the empty string (no
+compression), (ii) "ZLIB", or (iii) "GZIP".
+END
+ }
+ in_arg {
+ name: "buffer_size"
+ description: <<END
+A scalar representing the number of bytes to buffer. A value of
+0 means no buffering will be performed.
+END
+ }
+ summary: "Creates a dataset that emits the records from one or more TFRecord files."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TFRecordReader.pbtxt b/tensorflow/core/api_def/base_api/api_def_TFRecordReader.pbtxt
new file mode 100644
index 0000000000..100e346753
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TFRecordReader.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "TFRecordReader"
+ visibility: SKIP
+ out_arg {
+ name: "reader_handle"
+ description: <<END
+The handle to reference the Reader.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this reader is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this reader is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ summary: "A Reader that outputs the records from a TensorFlow Records file."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TFRecordReaderV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TFRecordReaderV2.pbtxt
new file mode 100644
index 0000000000..f12ebe54ef
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TFRecordReaderV2.pbtxt
@@ -0,0 +1,27 @@
+op {
+ graph_op_name: "TFRecordReaderV2"
+ endpoint {
+ name: "TFRecordReader"
+ }
+ out_arg {
+ name: "reader_handle"
+ description: <<END
+The handle to reference the Reader.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this reader is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this reader is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ summary: "A Reader that outputs the records from a TensorFlow Records file."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TakeDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_TakeDataset.pbtxt
new file mode 100644
index 0000000000..8808dc6b1f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TakeDataset.pbtxt
@@ -0,0 +1,12 @@
+op {
+ graph_op_name: "TakeDataset"
+ in_arg {
+ name: "count"
+ description: <<END
+A scalar representing the number of elements from the `input_dataset`
+that should be taken. A value of `-1` indicates that all of `input_dataset`
+is taken.
+END
+ }
+ summary: "Creates a dataset that contains `count` elements from the `input_dataset`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TakeManySparseFromTensorsMap.pbtxt b/tensorflow/core/api_def/base_api/api_def_TakeManySparseFromTensorsMap.pbtxt
new file mode 100644
index 0000000000..2073d72451
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TakeManySparseFromTensorsMap.pbtxt
@@ -0,0 +1,100 @@
+op {
+ graph_op_name: "TakeManySparseFromTensorsMap"
+ in_arg {
+ name: "sparse_handles"
+ description: <<END
+1-D, The `N` serialized `SparseTensor` objects.
+Shape: `[N]`.
+END
+ }
+ out_arg {
+ name: "sparse_indices"
+ description: <<END
+2-D. The `indices` of the minibatch `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "sparse_values"
+ description: <<END
+1-D. The `values` of the minibatch `SparseTensor`.
+END
+ }
+ out_arg {
+ name: "sparse_shape"
+ description: <<END
+1-D. The `shape` of the minibatch `SparseTensor`.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The `dtype` of the `SparseTensor` objects stored in the
+`SparseTensorsMap`.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+The container name for the `SparseTensorsMap` read by this op.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+The shared name for the `SparseTensorsMap` read by this op.
+It should not be blank; rather the `shared_name` or unique Operation name
+of the Op that created the original `SparseTensorsMap` should be used.
+END
+ }
+ summary: "Read `SparseTensors` from a `SparseTensorsMap` and concatenate them."
+ description: <<END
+The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
+`N` is the minibatch size and the rows correspond to the output handles of
+`AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the
+original `SparseTensor` objects that went into the given input ops must all
+match. When the final `SparseTensor` is created, it has rank one
+higher than the ranks of the incoming `SparseTensor` objects
+(they have been concatenated along a new row dimension on the left).
+
+The output `SparseTensor` object's shape values for all dimensions but the
+first are the max across the input `SparseTensor` objects' shape values
+for the corresponding dimensions. Its first shape value is `N`, the minibatch
+size.
+
+The input `SparseTensor` objects' indices are assumed ordered in
+standard lexicographic order. If this is not the case, after this
+step run `SparseReorder` to restore index ordering.
+
+For example, if the handles represent an input, which is a `[2, 3]` matrix
+representing two original `SparseTensor` objects:
+
+```
+ index = [ 0]
+ [10]
+ [20]
+ values = [1, 2, 3]
+ shape = [50]
+```
+
+and
+
+```
+ index = [ 2]
+ [10]
+ values = [4, 5]
+ shape = [30]
+```
+
+then the final `SparseTensor` will be:
+
+```
+ index = [0 0]
+ [0 10]
+ [0 20]
+ [1 2]
+ [1 10]
+ values = [1, 2, 3, 4, 5]
+ shape = [2 50]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Tan.pbtxt b/tensorflow/core/api_def/base_api/api_def_Tan.pbtxt
new file mode 100644
index 0000000000..20f3e4eab3
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Tan.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Tan"
+ summary: "Computes tan of x element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Tanh.pbtxt b/tensorflow/core/api_def/base_api/api_def_Tanh.pbtxt
new file mode 100644
index 0000000000..3658ee641a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Tanh.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Tanh"
+ summary: "Computes hyperbolic tangent of `x` element-wise."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TanhGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_TanhGrad.pbtxt
new file mode 100644
index 0000000000..ef71385a2d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TanhGrad.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "TanhGrad"
+ visibility: HIDDEN
+ summary: "Computes the gradient for the tanh of `x` wrt its input."
+ description: <<END
+Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
+is the corresponding input gradient.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TemporaryVariable.pbtxt b/tensorflow/core/api_def/base_api/api_def_TemporaryVariable.pbtxt
new file mode 100644
index 0000000000..3a41f69aa2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TemporaryVariable.pbtxt
@@ -0,0 +1,45 @@
+op {
+ graph_op_name: "TemporaryVariable"
+ out_arg {
+ name: "ref"
+ description: <<END
+A reference to the variable tensor.
+END
+ }
+ attr {
+ name: "shape"
+ description: <<END
+The shape of the variable tensor.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of elements in the variable tensor.
+END
+ }
+ attr {
+ name: "var_name"
+ description: <<END
+Overrides the name used for the temporary variable resource. Default
+value is the name of the 'TemporaryVariable' op (which is guaranteed unique).
+END
+ }
+ summary: "Returns a tensor that may be mutated, but only persists within a single step."
+ description: <<END
+This is an experimental op for internal use only and it is possible to use this
+op in unsafe ways. DO NOT USE unless you fully understand the risks.
+
+It is the caller's responsibility to ensure that 'ref' is eventually passed to a
+matching 'DestroyTemporaryVariable' op after all other uses have completed.
+
+Outputs a ref to the tensor state so it may be read or modified.
+
+ E.g.
+ var = state_ops._temporary_variable([1, 2], types.float_)
+ var_name = var.op.name
+ var = state_ops.assign(var, [[4.0, 5.0]])
+ var = state_ops.assign_add(var, [[6.0, 7.0]])
+ final = state_ops._destroy_temporary_variable(var, var_name=var_name)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArray.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArray.pbtxt
new file mode 100644
index 0000000000..7eaa468130
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArray.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArray"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayClose.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayClose.pbtxt
new file mode 100644
index 0000000000..e866250d3a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayClose.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArrayClose"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayCloseV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayCloseV2.pbtxt
new file mode 100644
index 0000000000..ec784c94fb
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayCloseV2.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "TensorArrayCloseV2"
+ visibility: SKIP
+ summary: "Deprecated. Use TensorArrayCloseV3"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayCloseV3.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayCloseV3.pbtxt
new file mode 100644
index 0000000000..4e469e4c07
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayCloseV3.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "TensorArrayCloseV3"
+ endpoint {
+ name: "TensorArrayClose"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
+END
+ }
+ summary: "Delete the TensorArray from its resource container."
+ description: <<END
+This enables the user to close and release the resource in the middle
+of a step/run.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayConcat.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayConcat.pbtxt
new file mode 100644
index 0000000000..e72b58de1c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayConcat.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArrayConcat"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayConcatV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayConcatV2.pbtxt
new file mode 100644
index 0000000000..289b1ba387
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayConcatV2.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "TensorArrayConcatV2"
+ visibility: SKIP
+ summary: "Deprecated. Use TensorArrayConcatV3"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayConcatV3.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayConcatV3.pbtxt
new file mode 100644
index 0000000000..502323b277
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayConcatV3.pbtxt
@@ -0,0 +1,62 @@
+op {
+ graph_op_name: "TensorArrayConcatV3"
+ endpoint {
+ name: "TensorArrayConcat"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a TensorArray.
+END
+ }
+ in_arg {
+ name: "flow_in"
+ description: <<END
+A float scalar that enforces proper chaining of operations.
+END
+ }
+ out_arg {
+ name: "value"
+ description: <<END
+All of the elements in the TensorArray, concatenated along the first
+axis.
+END
+ }
+ out_arg {
+ name: "lengths"
+ description: <<END
+A vector of the row sizes of the original T elements in the
+value output. In the example above, this would be the values:
+`(n1, n2, ..., n(T-1))`.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the elem that is returned.
+END
+ }
+ attr {
+ name: "element_shape_except0"
+ description: <<END
+The expected shape of an element, if known,
+excluding the first dimension. Used to validate the shapes of
+TensorArray elements. If this shape is not fully specified, concatenating
+zero-size TensorArrays is an error.
+END
+ }
+ summary: "Concat the elements from the TensorArray into value `value`."
+ description: <<END
+Takes `T` elements of shapes
+
+ ```
+ (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
+ ```
+
+and concatenates them into a Tensor of shape:
+
+ ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
+
+All elements must have the same shape (excepting the first dimension).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayGather.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayGather.pbtxt
new file mode 100644
index 0000000000..d4d179874f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayGather.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArrayGather"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayGatherV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayGatherV2.pbtxt
new file mode 100644
index 0000000000..df17802026
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayGatherV2.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "TensorArrayGatherV2"
+ visibility: SKIP
+ summary: "Deprecated. Use TensorArrayGatherV3"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayGatherV3.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayGatherV3.pbtxt
new file mode 100644
index 0000000000..44b4cd8143
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayGatherV3.pbtxt
@@ -0,0 +1,49 @@
+op {
+ graph_op_name: "TensorArrayGatherV3"
+ endpoint {
+ name: "TensorArrayGather"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a TensorArray.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+The locations in the TensorArray from which to read tensor elements.
+END
+ }
+ in_arg {
+ name: "flow_in"
+ description: <<END
+A float scalar that enforces proper chaining of operations.
+END
+ }
+ out_arg {
+ name: "value"
+ description: <<END
+All of the elements in the TensorArray, concatenated along a new
+axis (the new dimension 0).
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the elem that is returned.
+END
+ }
+ attr {
+ name: "element_shape"
+ description: <<END
+The expected shape of an element, if known. Used to
+validate the shapes of TensorArray elements. If this shape is not
+fully specified, gathering zero-size TensorArrays is an error.
+END
+ }
+ summary: "Gather specific elements from the TensorArray into output `value`."
+ description: <<END
+All elements selected by `indices` must have the same shape.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayGrad.pbtxt
new file mode 100644
index 0000000000..517461edba
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayGrad.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArrayGrad"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayGradV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayGradV2.pbtxt
new file mode 100644
index 0000000000..846aa705db
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayGradV2.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "TensorArrayGradV2"
+ visibility: SKIP
+ summary: "Deprecated. Use TensorArrayGradV3"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayGradV3.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayGradV3.pbtxt
new file mode 100644
index 0000000000..60634a0c8e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayGradV3.pbtxt
@@ -0,0 +1,64 @@
+op {
+ graph_op_name: "TensorArrayGradV3"
+ endpoint {
+ name: "TensorArrayGrad"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to the forward TensorArray.
+END
+ }
+ in_arg {
+ name: "flow_in"
+ description: <<END
+A float scalar that enforces proper chaining of operations.
+END
+ }
+ attr {
+ name: "source"
+ description: <<END
+The gradient source string, used to decide which gradient TensorArray
+to return.
+END
+ }
+ summary: "Creates a TensorArray for storing the gradients of values in the given handle."
+ description: <<END
+If the given TensorArray gradient already exists, returns a reference to it.
+
+Locks the size of the original TensorArray by disabling its dynamic size flag.
+
+**A note about the input flow_in:**
+
+The handle flow_in forces the execution of the gradient lookup to occur
+only after certain other operations have occurred. For example, when
+the forward TensorArray is dynamically sized, writes to this TensorArray
+may resize the object. The gradient TensorArray is statically sized based
+on the size of the forward TensorArray when this operation executes.
+Furthermore, the size of the forward TensorArray is frozen by this call.
+As a result, the flow is used to ensure that the call to generate the gradient
+TensorArray only happens after all writes are executed.
+
+In the case of dynamically sized TensorArrays, gradient computation should
+only be performed on read operations that have themselves been chained via
+flow to occur only after all writes have executed. That way the final size
+of the forward TensorArray is known when this operation is called.
+
+**A note about the source attribute:**
+
+TensorArray gradient calls use an accumulator TensorArray object. If
+multiple gradients are calculated and run in the same session, the multiple
+gradient nodes may accidentally flow through the same accumulator TensorArray.
+This double counts and generally breaks the TensorArray gradient flow.
+
+The solution is to identify which gradient call this particular
+TensorArray gradient is being called in. This is performed by identifying
+a unique string (e.g. "gradients", "gradients_1", ...) from the input
+gradient Tensor's name. This string is used as a suffix when creating
+the TensorArray gradient object here (the attribute `source`).
+
+The attribute `source` is added as a suffix to the forward TensorArray's
+name when performing the creation / lookup, so that each separate gradient
+calculation gets its own TensorArray accumulator.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayPack.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayPack.pbtxt
new file mode 100644
index 0000000000..030950b06f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayPack.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArrayPack"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayRead.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayRead.pbtxt
new file mode 100644
index 0000000000..1b62f7fac7
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayRead.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArrayRead"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayReadV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayReadV2.pbtxt
new file mode 100644
index 0000000000..934d7e432a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayReadV2.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "TensorArrayReadV2"
+ visibility: SKIP
+ summary: "Deprecated. Use TensorArrayReadV3"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayReadV3.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayReadV3.pbtxt
new file mode 100644
index 0000000000..4f07182f2b
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayReadV3.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "TensorArrayReadV3"
+ endpoint {
+ name: "TensorArrayRead"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a TensorArray.
+END
+ }
+ in_arg {
+ name: "flow_in"
+ description: <<END
+A float scalar that enforces proper chaining of operations.
+END
+ }
+ out_arg {
+ name: "value"
+ description: <<END
+The tensor that is read from the TensorArray.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the elem that is returned.
+END
+ }
+ summary: "Read an element from the TensorArray into output `value`."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayScatter.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayScatter.pbtxt
new file mode 100644
index 0000000000..a3e8d1625e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayScatter.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArrayScatter"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayScatterV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayScatterV2.pbtxt
new file mode 100644
index 0000000000..aa74b6af6a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayScatterV2.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "TensorArrayScatterV2"
+ visibility: SKIP
+ summary: "Deprecated. Use TensorArrayScatterV3"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayScatterV3.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayScatterV3.pbtxt
new file mode 100644
index 0000000000..69539e8259
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayScatterV3.pbtxt
@@ -0,0 +1,40 @@
+op {
+ graph_op_name: "TensorArrayScatterV3"
+ endpoint {
+ name: "TensorArrayScatter"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a TensorArray.
+END
+ }
+ in_arg {
+ name: "indices"
+ description: <<END
+The locations at which to write the tensor elements.
+END
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+The concatenated tensor to write to the TensorArray.
+END
+ }
+ in_arg {
+ name: "flow_in"
+ description: <<END
+A float scalar that enforces proper chaining of operations.
+END
+ }
+ out_arg {
+ name: "flow_out"
+ description: <<END
+A float scalar that enforces proper chaining of operations.
+END
+ }
+ summary: "Scatter the data from the input value into specific TensorArray elements."
+ description: <<END
+`indices` must be a vector, its length must match the first dim of `value`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArraySize.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArraySize.pbtxt
new file mode 100644
index 0000000000..fb3a6fae1c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArraySize.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArraySize"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArraySizeV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArraySizeV2.pbtxt
new file mode 100644
index 0000000000..b9c7483236
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArraySizeV2.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "TensorArraySizeV2"
+ visibility: SKIP
+ summary: "Deprecated. Use TensorArraySizeV3"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArraySizeV3.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArraySizeV3.pbtxt
new file mode 100644
index 0000000000..76a7c8804f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArraySizeV3.pbtxt
@@ -0,0 +1,25 @@
+op {
+ graph_op_name: "TensorArraySizeV3"
+ endpoint {
+ name: "TensorArraySize"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
+END
+ }
+ in_arg {
+ name: "flow_in"
+ description: <<END
+A float scalar that enforces proper chaining of operations.
+END
+ }
+ out_arg {
+ name: "size"
+ description: <<END
+The current size of the TensorArray.
+END
+ }
+ summary: "Get the current size of the TensorArray."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArraySplit.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArraySplit.pbtxt
new file mode 100644
index 0000000000..3eb8d6c7ff
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArraySplit.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArraySplit"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArraySplitV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArraySplitV2.pbtxt
new file mode 100644
index 0000000000..15a0b18d04
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArraySplitV2.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "TensorArraySplitV2"
+ visibility: SKIP
+ summary: "Deprecated. Use TensorArraySplitV3"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArraySplitV3.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArraySplitV3.pbtxt
new file mode 100644
index 0000000000..c2aeb4f660
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArraySplitV3.pbtxt
@@ -0,0 +1,57 @@
+op {
+ graph_op_name: "TensorArraySplitV3"
+ endpoint {
+ name: "TensorArraySplit"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a TensorArray.
+END
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+The concatenated tensor to write to the TensorArray.
+END
+ }
+ in_arg {
+ name: "lengths"
+ description: <<END
+The vector of lengths, how to split the rows of value into the
+TensorArray.
+END
+ }
+ in_arg {
+ name: "flow_in"
+ description: <<END
+A float scalar that enforces proper chaining of operations.
+END
+ }
+ out_arg {
+ name: "flow_out"
+ description: <<END
+A float scalar that enforces proper chaining of operations.
+END
+ }
+ summary: "Split the data from the input value into TensorArray elements."
+ description: <<END
+Assuming that `lengths` takes on values
+
+ ```(n0, n1, ..., n(T-1))```
+
+and that `value` has shape
+
+ ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
+
+this splits values into a TensorArray with T tensors.
+
+TensorArray index t will be the subtensor of values with starting position
+
+ ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
+
+and having size
+
+ ```nt x d0 x d1 x ...```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayUnpack.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayUnpack.pbtxt
new file mode 100644
index 0000000000..a9011de23e
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayUnpack.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArrayUnpack"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayV2.pbtxt
new file mode 100644
index 0000000000..f4d58e7721
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayV2.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "TensorArrayV2"
+ visibility: SKIP
+ summary: "Deprecated. Use TensorArrayV3"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayV3.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayV3.pbtxt
new file mode 100644
index 0000000000..d1de753ee5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayV3.pbtxt
@@ -0,0 +1,65 @@
+op {
+ graph_op_name: "TensorArrayV3"
+ endpoint {
+ name: "TensorArray"
+ }
+ in_arg {
+ name: "size"
+ description: <<END
+The size of the array.
+END
+ }
+ out_arg {
+ name: "handle"
+ description: <<END
+The handle to the TensorArray.
+END
+ }
+ out_arg {
+ name: "flow"
+ description: <<END
+A scalar used to control gradient flow.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the elements on the tensor_array.
+END
+ }
+ attr {
+ name: "element_shape"
+ description: <<END
+The expected shape of an element, if known. Used to
+validate the shapes of TensorArray elements. If this shape is not
+fully specified, gathering zero-size TensorArrays is an error.
+END
+ }
+ attr {
+ name: "dynamic_size"
+ description: <<END
+A boolean that determines whether writes to the TensorArray
+are allowed to grow the size. By default, this is not allowed.
+END
+ }
+ attr {
+ name: "clear_after_read"
+ description: <<END
+If true (default), Tensors in the TensorArray are cleared
+after being read. This disables multiple read semantics but allows early
+release of memory.
+END
+ }
+ attr {
+ name: "tensor_array_name"
+ description: <<END
+Overrides the name used for the temporary tensor_array
+resource. Default value is the name of the 'TensorArray' op (which
+is guaranteed unique).
+END
+ }
+ summary: "An array of Tensors of given size."
+ description: <<END
+Write data via Write and read via Read or Pack.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayWrite.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayWrite.pbtxt
new file mode 100644
index 0000000000..92ab1764ec
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayWrite.pbtxt
@@ -0,0 +1,3 @@
+op {
+ graph_op_name: "TensorArrayWrite"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayWriteV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayWriteV2.pbtxt
new file mode 100644
index 0000000000..f7af8c3ab2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayWriteV2.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "TensorArrayWriteV2"
+ visibility: SKIP
+ summary: "Deprecated. Use TensorArrayGradV3"
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorArrayWriteV3.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorArrayWriteV3.pbtxt
new file mode 100644
index 0000000000..312b4b472d
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorArrayWriteV3.pbtxt
@@ -0,0 +1,37 @@
+op {
+ graph_op_name: "TensorArrayWriteV3"
+ endpoint {
+ name: "TensorArrayWrite"
+ }
+ in_arg {
+ name: "handle"
+ description: <<END
+The handle to a TensorArray.
+END
+ }
+ in_arg {
+ name: "index"
+ description: <<END
+The position to write to inside the TensorArray.
+END
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+The tensor to write to the TensorArray.
+END
+ }
+ in_arg {
+ name: "flow_in"
+ description: <<END
+A float scalar that enforces proper chaining of operations.
+END
+ }
+ out_arg {
+ name: "flow_out"
+ description: <<END
+A float scalar that enforces proper chaining of operations.
+END
+ }
+ summary: "Push an element onto the tensor_array."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorDataset.pbtxt
new file mode 100644
index 0000000000..050e174aac
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorDataset.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorDataset"
+ summary: "Creates a dataset that emits `components` as a tuple of tensors once."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorSliceDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorSliceDataset.pbtxt
new file mode 100644
index 0000000000..a26a98fd7f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorSliceDataset.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorSliceDataset"
+ summary: "Creates a dataset that emits each dim-0 slice of `components` once."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorSummary.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorSummary.pbtxt
new file mode 100644
index 0000000000..7601e7e162
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorSummary.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "TensorSummary"
+ in_arg {
+ name: "tensor"
+ description: <<END
+A tensor to serialize.
+END
+ }
+ attr {
+ name: "description"
+ description: <<END
+A json-encoded SummaryDescription proto.
+END
+ }
+ attr {
+ name: "labels"
+ description: <<END
+An unused list of strings.
+END
+ }
+ attr {
+ name: "display_name"
+ description: <<END
+An unused string.
+END
+ }
+ summary: "Outputs a `Summary` protocol buffer with a tensor."
+ description: <<END
+This op is being phased out in favor of TensorSummaryV2, which lets callers pass
+a tag as well as a serialized SummaryMetadata proto string that contains
+plugin-specific data. We will keep this op to maintain backwards compatibility.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TensorSummaryV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TensorSummaryV2.pbtxt
new file mode 100644
index 0000000000..6e03c5dc05
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TensorSummaryV2.pbtxt
@@ -0,0 +1,23 @@
+op {
+ graph_op_name: "TensorSummaryV2"
+ in_arg {
+ name: "tag"
+ description: <<END
+A string attached to this summary. Used for organization in TensorBoard.
+END
+ }
+ in_arg {
+ name: "tensor"
+ description: <<END
+A tensor to serialize.
+END
+ }
+ in_arg {
+ name: "serialized_summary_metadata"
+ description: <<END
+A serialized SummaryMetadata proto. Contains plugin
+data.
+END
+ }
+ summary: "Outputs a `Summary` protocol buffer with a tensor and per-plugin data."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TextLineDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_TextLineDataset.pbtxt
new file mode 100644
index 0000000000..6b63050996
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TextLineDataset.pbtxt
@@ -0,0 +1,24 @@
+op {
+ graph_op_name: "TextLineDataset"
+ in_arg {
+ name: "filenames"
+ description: <<END
+A scalar or a vector containing the name(s) of the file(s) to be
+read.
+END
+ }
+ in_arg {
+ name: "compression_type"
+ description: <<END
+A scalar containing either (i) the empty string (no
+compression), (ii) "ZLIB", or (iii) "GZIP".
+END
+ }
+ in_arg {
+ name: "buffer_size"
+ description: <<END
+A scalar containing the number of bytes to buffer.
+END
+ }
+ summary: "Creates a dataset that emits the lines of one or more text files."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TextLineReader.pbtxt b/tensorflow/core/api_def/base_api/api_def_TextLineReader.pbtxt
new file mode 100644
index 0000000000..74ed1da8ff
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TextLineReader.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "TextLineReader"
+ visibility: SKIP
+ out_arg {
+ name: "reader_handle"
+ description: <<END
+The handle to reference the Reader.
+END
+ }
+ attr {
+ name: "skip_header_lines"
+ description: <<END
+Number of lines to skip from the beginning of every file.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this reader is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this reader is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ summary: "A Reader that outputs the lines of a file delimited by \'\\n\'."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TextLineReaderV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TextLineReaderV2.pbtxt
new file mode 100644
index 0000000000..0de7655b74
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TextLineReaderV2.pbtxt
@@ -0,0 +1,33 @@
+op {
+ graph_op_name: "TextLineReaderV2"
+ endpoint {
+ name: "TextLineReader"
+ }
+ out_arg {
+ name: "reader_handle"
+ description: <<END
+The handle to reference the Reader.
+END
+ }
+ attr {
+ name: "skip_header_lines"
+ description: <<END
+Number of lines to skip from the beginning of every file.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this reader is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this reader is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ summary: "A Reader that outputs the lines of a file delimited by \'\\n\'."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ThreadUnsafeUnigramCandidateSampler.pbtxt b/tensorflow/core/api_def/base_api/api_def_ThreadUnsafeUnigramCandidateSampler.pbtxt
new file mode 100644
index 0000000000..2619aae806
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ThreadUnsafeUnigramCandidateSampler.pbtxt
@@ -0,0 +1,87 @@
+op {
+ graph_op_name: "ThreadUnsafeUnigramCandidateSampler"
+ visibility: SKIP
+ in_arg {
+ name: "true_classes"
+ description: <<END
+A batch_size * num_true matrix, in which each row contains the
+IDs of the num_true target_classes in the corresponding original label.
+END
+ }
+ out_arg {
+ name: "sampled_candidates"
+ description: <<END
+A vector of length num_sampled, in which each element is
+the ID of a sampled candidate.
+END
+ }
+ out_arg {
+ name: "true_expected_count"
+ description: <<END
+A batch_size * num_true matrix, representing
+the number of times each candidate is expected to occur in a batch
+of sampled candidates. If unique=true, then this is a probability.
+END
+ }
+ out_arg {
+ name: "sampled_expected_count"
+ description: <<END
+A vector of length num_sampled, for each sampled
+candidate representing the number of times the candidate is expected
+to occur in a batch of sampled candidates. If unique=true, then this is a
+probability.
+END
+ }
+ attr {
+ name: "num_true"
+ description: <<END
+Number of true labels per context.
+END
+ }
+ attr {
+ name: "num_sampled"
+ description: <<END
+Number of candidates to randomly sample.
+END
+ }
+ attr {
+ name: "unique"
+ description: <<END
+If unique is true, we sample with rejection, so that all sampled
+candidates in a batch are unique. This requires some approximation to
+estimate the post-rejection sampling probabilities.
+END
+ }
+ attr {
+ name: "range_max"
+ description: <<END
+The sampler will sample integers from the interval [0, range_max).
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+An second seed to avoid seed collision.
+END
+ }
+ summary: "Generates labels for candidate sampling with a learned unigram distribution."
+ description: <<END
+See explanations of candidate sampling and the data formats at
+go/candidate-sampling.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Tile.pbtxt b/tensorflow/core/api_def/base_api/api_def_Tile.pbtxt
new file mode 100644
index 0000000000..97e1cae19c
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Tile.pbtxt
@@ -0,0 +1,23 @@
+op {
+ graph_op_name: "Tile"
+ in_arg {
+ name: "input"
+ description: <<END
+1-D or higher.
+END
+ }
+ in_arg {
+ name: "multiples"
+ description: <<END
+1-D. Length must be the same as the number of dimensions in `input`
+END
+ }
+ summary: "Constructs a tensor by tiling a given tensor."
+ description: <<END
+This operation creates a new tensor by replicating `input` `multiples` times.
+The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
+and the values of `input` are replicated `multiples[i]` times along the 'i'th
+dimension. For example, tiling `[a b c d]` by `[2]` produces
+`[a b c d a b c d]`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TileGrad.pbtxt b/tensorflow/core/api_def/base_api/api_def_TileGrad.pbtxt
new file mode 100644
index 0000000000..b211534259
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TileGrad.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "TileGrad"
+ summary: "Returns the gradient of `Tile`."
+ description: <<END
+Since `Tile` takes an input and repeats the input `multiples` times
+along each dimension, `TileGrad` takes in `multiples` and aggregates
+each repeated tile of `input` into `output`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TopK.pbtxt b/tensorflow/core/api_def/base_api/api_def_TopK.pbtxt
new file mode 100644
index 0000000000..c4060d0afa
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TopK.pbtxt
@@ -0,0 +1,50 @@
+op {
+ graph_op_name: "TopK"
+ in_arg {
+ name: "input"
+ description: <<END
+1-D or higher with last dimension at least `k`.
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+The `k` largest elements along each last dimensional slice.
+END
+ }
+ out_arg {
+ name: "indices"
+ description: <<END
+The indices of `values` within the last dimension of `input`.
+END
+ }
+ attr {
+ name: "k"
+ description: <<END
+Number of top elements to look for along the last dimension (along each
+row for matrices).
+END
+ }
+ attr {
+ name: "sorted"
+ description: <<END
+If true the resulting `k` elements will be sorted by the values in
+descending order.
+END
+ }
+ summary: "Finds values and indices of the `k` largest elements for the last dimension."
+ description: <<END
+If the input is a vector (rank-1), finds the `k` largest entries in the vector
+and outputs their values and indices as vectors. Thus `values[j]` is the
+`j`-th largest entry in `input`, and its index is `indices[j]`.
+
+For matrices (resp. higher rank input), computes the top `k` entries in each
+row (resp. vector along the last dimension). Thus,
+
+ values.shape = indices.shape = input.shape[:-1] + [k]
+
+If two elements are equal, the lower-index element appears first.
+
+If `k` varies dynamically, use `TopKV2` below.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TopKV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_TopKV2.pbtxt
new file mode 100644
index 0000000000..fd17df16a2
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TopKV2.pbtxt
@@ -0,0 +1,51 @@
+op {
+ graph_op_name: "TopKV2"
+ endpoint {
+ name: "TopK"
+ }
+ in_arg {
+ name: "input"
+ description: <<END
+1-D or higher with last dimension at least `k`.
+END
+ }
+ in_arg {
+ name: "k"
+ description: <<END
+0-D. Number of top elements to look for along the last dimension (along each
+row for matrices).
+END
+ }
+ out_arg {
+ name: "values"
+ description: <<END
+The `k` largest elements along each last dimensional slice.
+END
+ }
+ out_arg {
+ name: "indices"
+ description: <<END
+The indices of `values` within the last dimension of `input`.
+END
+ }
+ attr {
+ name: "sorted"
+ description: <<END
+If true the resulting `k` elements will be sorted by the values in
+descending order.
+END
+ }
+ summary: "Finds values and indices of the `k` largest elements for the last dimension."
+ description: <<END
+If the input is a vector (rank-1), finds the `k` largest entries in the vector
+and outputs their values and indices as vectors. Thus `values[j]` is the
+`j`-th largest entry in `input`, and its index is `indices[j]`.
+
+For matrices (resp. higher rank input), computes the top `k` entries in each
+row (resp. vector along the last dimension). Thus,
+
+ values.shape = indices.shape = input.shape[:-1] + [k]
+
+If two elements are equal, the lower-index element appears first.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Transpose.pbtxt b/tensorflow/core/api_def/base_api/api_def_Transpose.pbtxt
new file mode 100644
index 0000000000..0ec7fae659
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Transpose.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Transpose"
+ summary: "Shuffle dimensions of x according to a permutation."
+ description: <<END
+The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
+ `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TruncateDiv.pbtxt b/tensorflow/core/api_def/base_api/api_def_TruncateDiv.pbtxt
new file mode 100644
index 0000000000..ef1b987313
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TruncateDiv.pbtxt
@@ -0,0 +1,13 @@
+op {
+ graph_op_name: "TruncateDiv"
+ summary: "Returns x / y element-wise for integer types."
+ description: <<END
+Truncation designates that negative numbers will round fractional quantities
+toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
+than Python semantics. See `FloorDiv` for a division function that matches
+Python Semantics.
+
+*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TruncateMod.pbtxt b/tensorflow/core/api_def/base_api/api_def_TruncateMod.pbtxt
new file mode 100644
index 0000000000..804f70ab52
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TruncateMod.pbtxt
@@ -0,0 +1,11 @@
+op {
+ graph_op_name: "TruncateMod"
+ summary: "Returns element-wise remainder of division. This emulates C semantics in that"
+ description: <<END
+the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
+y + truncate_mod(x, y) = x`.
+
+*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
+[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_TruncatedNormal.pbtxt b/tensorflow/core/api_def/base_api/api_def_TruncatedNormal.pbtxt
new file mode 100644
index 0000000000..3da930d6f8
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_TruncatedNormal.pbtxt
@@ -0,0 +1,42 @@
+op {
+ graph_op_name: "TruncatedNormal"
+ in_arg {
+ name: "shape"
+ description: <<END
+The shape of the output tensor.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+A tensor of the specified shape filled with random truncated normal
+values.
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either `seed` or `seed2` are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+A second seed to avoid seed collision.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of the output.
+END
+ }
+ summary: "Outputs random values from a truncated normal distribution."
+ description: <<END
+The generated values follow a normal distribution with mean 0 and standard
+deviation 1, except that values whose magnitude is more than 2 standard
+deviations from the mean are dropped and re-picked.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_U.pbtxt b/tensorflow/core/api_def/base_api/api_def_U.pbtxt
deleted file mode 100644
index 6699efc0e0..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_U.pbtxt
+++ /dev/null
@@ -1,150 +0,0 @@
-op {
- graph_op_name: "UniformCandidateSampler"
- endpoint {
- name: "UniformCandidateSampler"
- }
- summary: "Generates labels for candidate sampling with a uniform distribution."
- description: <<END
-See explanations of candidate sampling and the data formats at
-go/candidate-sampling.
-
-For each batch, this op picks a single set of sampled candidate labels.
-
-The advantages of sampling candidates per-batch are simplicity and the
-possibility of efficient dense matrix multiplication. The disadvantage is that
-the sampled candidates must be chosen independently of the context and of the
-true labels.
-END
-}
-op {
- graph_op_name: "Unique"
- endpoint {
- name: "Unique"
- }
- summary: "Finds unique elements in a 1-D tensor."
- description: <<END
-This operation returns a tensor `y` containing all of the unique elements of `x`
-sorted in the same order that they occur in `x`. This operation also returns a
-tensor `idx` the same size as `x` that contains the index of each value of `x`
-in the unique output `y`. In other words:
-
-`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
-
-For example:
-
-```
-# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
-y, idx = unique(x)
-y ==> [1, 2, 4, 7, 8]
-idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
-```
-END
-}
-op {
- graph_op_name: "UniqueWithCounts"
- endpoint {
- name: "UniqueWithCounts"
- }
- summary: "Finds unique elements in a 1-D tensor."
- description: <<END
-This operation returns a tensor `y` containing all of the unique elements of `x`
-sorted in the same order that they occur in `x`. This operation also returns a
-tensor `idx` the same size as `x` that contains the index of each value of `x`
-in the unique output `y`. Finally, it returns a third tensor `count` that
-contains the count of each element of `y` in `x`. In other words:
-
-`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
-
-For example:
-
-```
-# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
-y, idx, count = unique_with_counts(x)
-y ==> [1, 2, 4, 7, 8]
-idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
-count ==> [2, 1, 3, 1, 2]
-```
-END
-}
-op {
- graph_op_name: "Unpack"
- endpoint {
- name: "Unpack"
- }
- summary: "Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors."
- description: <<END
-Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
-For example, given a tensor of shape `(A, B, C, D)`;
-
-If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
- and each tensor in `output` will have shape `(B, C, D)`. (Note that the
- dimension unpacked along is gone, unlike `split`).
-
-If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
- and each tensor in `output` will have shape `(A, C, D)`.
-Etc.
-
-This is the opposite of `pack`.
-END
-}
-op {
- graph_op_name: "UnsortedSegmentMax"
- endpoint {
- name: "UnsortedSegmentMax"
- }
- summary: "Computes the Max along segments of a tensor."
- description: <<END
-Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-segments.
-
-This operator is similar to the [unsorted segment sum operator](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
-Instead of computing the sum over segments, it computes the maximum
-such that:
-
-\\(output_i = \max_j data_j\\) where max is over `j` such
-that `segment_ids[j] == i`.
-
-If the maximum is empty for a given segment ID `i`, it outputs the smallest possible value for specific numeric type,
- `output[i] = numeric_limits<T>::min()`.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "UnsortedSegmentSum"
- endpoint {
- name: "UnsortedSegmentSum"
- }
- summary: "Computes the sum along segments of a tensor."
- description: <<END
-Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-segments.
-
-Computes a tensor such that
-`(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
-that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`
-need not be sorted and need not cover all values in the full
-range of valid values.
-
-If the sum is empty for a given segment ID `i`, `output[i] = 0`.
-
-`num_segments` should equal the number of distinct segment IDs.
-
-<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
-</div>
-END
-}
-op {
- graph_op_name: "Unstage"
- endpoint {
- name: "Unstage"
- }
- summary: "Op is similar to a lightweight Dequeue."
- description: <<END
-The basic functionality is similar to dequeue with many fewer
-capabilities and options. This Op is optimized for performance.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_UniformCandidateSampler.pbtxt b/tensorflow/core/api_def/base_api/api_def_UniformCandidateSampler.pbtxt
new file mode 100644
index 0000000000..4cf431a2e1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_UniformCandidateSampler.pbtxt
@@ -0,0 +1,86 @@
+op {
+ graph_op_name: "UniformCandidateSampler"
+ in_arg {
+ name: "true_classes"
+ description: <<END
+A batch_size * num_true matrix, in which each row contains the
+IDs of the num_true target_classes in the corresponding original label.
+END
+ }
+ out_arg {
+ name: "sampled_candidates"
+ description: <<END
+A vector of length num_sampled, in which each element is
+the ID of a sampled candidate.
+END
+ }
+ out_arg {
+ name: "true_expected_count"
+ description: <<END
+A batch_size * num_true matrix, representing
+the number of times each candidate is expected to occur in a batch
+of sampled candidates. If unique=true, then this is a probability.
+END
+ }
+ out_arg {
+ name: "sampled_expected_count"
+ description: <<END
+A vector of length num_sampled, for each sampled
+candidate representing the number of times the candidate is expected
+to occur in a batch of sampled candidates. If unique=true, then this is a
+probability.
+END
+ }
+ attr {
+ name: "num_true"
+ description: <<END
+Number of true labels per context.
+END
+ }
+ attr {
+ name: "num_sampled"
+ description: <<END
+Number of candidates to randomly sample.
+END
+ }
+ attr {
+ name: "unique"
+ description: <<END
+If unique is true, we sample with rejection, so that all sampled
+candidates in a batch are unique. This requires some approximation to
+estimate the post-rejection sampling probabilities.
+END
+ }
+ attr {
+ name: "range_max"
+ description: <<END
+The sampler will sample integers from the interval [0, range_max).
+END
+ }
+ attr {
+ name: "seed"
+ description: <<END
+If either seed or seed2 are set to be non-zero, the random number
+generator is seeded by the given seed. Otherwise, it is seeded by a
+random seed.
+END
+ }
+ attr {
+ name: "seed2"
+ description: <<END
+An second seed to avoid seed collision.
+END
+ }
+ summary: "Generates labels for candidate sampling with a uniform distribution."
+ description: <<END
+See explanations of candidate sampling and the data formats at
+go/candidate-sampling.
+
+For each batch, this op picks a single set of sampled candidate labels.
+
+The advantages of sampling candidates per-batch are simplicity and the
+possibility of efficient dense matrix multiplication. The disadvantage is that
+the sampled candidates must be chosen independently of the context and of the
+true labels.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Unique.pbtxt b/tensorflow/core/api_def/base_api/api_def_Unique.pbtxt
new file mode 100644
index 0000000000..a35b67e7b5
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Unique.pbtxt
@@ -0,0 +1,39 @@
+op {
+ graph_op_name: "Unique"
+ in_arg {
+ name: "x"
+ description: <<END
+1-D.
+END
+ }
+ out_arg {
+ name: "y"
+ description: <<END
+1-D.
+END
+ }
+ out_arg {
+ name: "idx"
+ description: <<END
+1-D.
+END
+ }
+ summary: "Finds unique elements in a 1-D tensor."
+ description: <<END
+This operation returns a tensor `y` containing all of the unique elements of `x`
+sorted in the same order that they occur in `x`. This operation also returns a
+tensor `idx` the same size as `x` that contains the index of each value of `x`
+in the unique output `y`. In other words:
+
+`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
+
+For example:
+
+```
+# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
+y, idx = unique(x)
+y ==> [1, 2, 4, 7, 8]
+idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_UniqueWithCounts.pbtxt b/tensorflow/core/api_def/base_api/api_def_UniqueWithCounts.pbtxt
new file mode 100644
index 0000000000..02d670644f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_UniqueWithCounts.pbtxt
@@ -0,0 +1,47 @@
+op {
+ graph_op_name: "UniqueWithCounts"
+ in_arg {
+ name: "x"
+ description: <<END
+1-D.
+END
+ }
+ out_arg {
+ name: "y"
+ description: <<END
+1-D.
+END
+ }
+ out_arg {
+ name: "idx"
+ description: <<END
+1-D.
+END
+ }
+ out_arg {
+ name: "count"
+ description: <<END
+1-D.
+END
+ }
+ summary: "Finds unique elements in a 1-D tensor."
+ description: <<END
+This operation returns a tensor `y` containing all of the unique elements of `x`
+sorted in the same order that they occur in `x`. This operation also returns a
+tensor `idx` the same size as `x` that contains the index of each value of `x`
+in the unique output `y`. Finally, it returns a third tensor `count` that
+contains the count of each element of `y` in `x`. In other words:
+
+`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
+
+For example:
+
+```
+# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
+y, idx, count = unique_with_counts(x)
+y ==> [1, 2, 4, 7, 8]
+idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
+count ==> [2, 1, 3, 1, 2]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Unpack.pbtxt b/tensorflow/core/api_def/base_api/api_def_Unpack.pbtxt
new file mode 100644
index 0000000000..716aa73956
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Unpack.pbtxt
@@ -0,0 +1,40 @@
+op {
+ graph_op_name: "Unpack"
+ endpoint {
+ name: "Unstack"
+ }
+ in_arg {
+ name: "value"
+ description: <<END
+1-D or higher, with `axis` dimension size equal to `num`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+The list of tensors unpacked from `value`.
+END
+ }
+ attr {
+ name: "axis"
+ description: <<END
+Dimension along which to unpack. Negative values wrap around, so the
+valid range is `[-R, R)`.
+END
+ }
+ summary: "Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors."
+ description: <<END
+Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
+For example, given a tensor of shape `(A, B, C, D)`;
+
+If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
+ and each tensor in `output` will have shape `(B, C, D)`. (Note that the
+ dimension unpacked along is gone, unlike `split`).
+
+If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
+ and each tensor in `output` will have shape `(A, C, D)`.
+Etc.
+
+This is the opposite of `pack`.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_UnsortedSegmentMax.pbtxt b/tensorflow/core/api_def/base_api/api_def_UnsortedSegmentMax.pbtxt
new file mode 100644
index 0000000000..8298d62f25
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_UnsortedSegmentMax.pbtxt
@@ -0,0 +1,36 @@
+op {
+ graph_op_name: "UnsortedSegmentMax"
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+A 1-D tensor whose rank is equal to the rank of `data`'s
+first dimension.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Has same shape as data, except for dimension 0 which
+has size `num_segments`.
+END
+ }
+ summary: "Computes the Max along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+This operator is similar to the [unsorted segment sum operator](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
+Instead of computing the sum over segments, it computes the maximum
+such that:
+
+\\(output_i = \max_j data_j\\) where max is over `j` such
+that `segment_ids[j] == i`.
+
+If the maximum is empty for a given segment ID `i`, it outputs the smallest possible value for specific numeric type,
+ `output[i] = numeric_limits<T>::min()`.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_UnsortedSegmentSum.pbtxt b/tensorflow/core/api_def/base_api/api_def_UnsortedSegmentSum.pbtxt
new file mode 100644
index 0000000000..0a3355cdbc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_UnsortedSegmentSum.pbtxt
@@ -0,0 +1,36 @@
+op {
+ graph_op_name: "UnsortedSegmentSum"
+ in_arg {
+ name: "segment_ids"
+ description: <<END
+A tensor whose shape is a prefix of `data.shape`.
+END
+ }
+ out_arg {
+ name: "output"
+ description: <<END
+Has same shape as data, except for the first `segment_ids.rank`
+dimensions, which are replaced with a single dimension which has size
+`num_segments`.
+END
+ }
+ summary: "Computes the sum along segments of a tensor."
+ description: <<END
+Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+segments.
+
+Computes a tensor such that
+`(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
+that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`
+need not be sorted and need not cover all values in the full
+range of valid values.
+
+If the sum is empty for a given segment ID `i`, `output[i] = 0`.
+
+`num_segments` should equal the number of distinct segment IDs.
+
+<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
+</div>
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Unstage.pbtxt b/tensorflow/core/api_def/base_api/api_def_Unstage.pbtxt
new file mode 100644
index 0000000000..2e18658430
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Unstage.pbtxt
@@ -0,0 +1,8 @@
+op {
+ graph_op_name: "Unstage"
+ summary: "Op is similar to a lightweight Dequeue."
+ description: <<END
+The basic functionality is similar to dequeue with many fewer
+capabilities and options. This Op is optimized for performance.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_V.pbtxt b/tensorflow/core/api_def/base_api/api_def_V.pbtxt
deleted file mode 100644
index 31cc147900..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_V.pbtxt
+++ /dev/null
@@ -1,19 +0,0 @@
-op {
- graph_op_name: "Variable"
- endpoint {
- name: "Variable"
- }
- summary: "Use VariableV2 instead."
-}
-op {
- graph_op_name: "VariableV2"
- endpoint {
- name: "VariableV2"
- }
- summary: "Holds state in the form of a tensor that persists across steps."
- description: <<END
-Outputs a ref to the tensor state so it may be read or modified.
-TODO(zhifengc/mrry): Adds a pointer to a more detail document
-about sharing states in tensorflow.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_VarHandleOp.pbtxt b/tensorflow/core/api_def/base_api/api_def_VarHandleOp.pbtxt
new file mode 100644
index 0000000000..0a4caa06bd
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_VarHandleOp.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "VarHandleOp"
+ attr {
+ name: "container"
+ description: <<END
+the container this variable is placed in.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+the name by which this variable is referred to.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+the type of this variable. Must agree with the dtypes
+of all ops using this variable.
+END
+ }
+ attr {
+ name: "shape"
+ description: <<END
+The (possibly partially specified) shape of this variable.
+END
+ }
+ summary: "Creates a handle to a Variable resource."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_VarIsInitializedOp.pbtxt b/tensorflow/core/api_def/base_api/api_def_VarIsInitializedOp.pbtxt
new file mode 100644
index 0000000000..a9c4cfd0b9
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_VarIsInitializedOp.pbtxt
@@ -0,0 +1,17 @@
+op {
+ graph_op_name: "VarIsInitializedOp"
+ in_arg {
+ name: "resource"
+ description: <<END
+the input resource handle.
+END
+ }
+ out_arg {
+ name: "is_initialized"
+ description: <<END
+a scalar boolean which is true if the variable has been
+initialized.
+END
+ }
+ summary: "Checks whether a resource handle-based variable has been initialized."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Variable.pbtxt b/tensorflow/core/api_def/base_api/api_def_Variable.pbtxt
new file mode 100644
index 0000000000..112ab6549f
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Variable.pbtxt
@@ -0,0 +1,5 @@
+op {
+ graph_op_name: "Variable"
+ visibility: SKIP
+ summary: "Use VariableV2 instead."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_VariableShape.pbtxt b/tensorflow/core/api_def/base_api/api_def_VariableShape.pbtxt
new file mode 100644
index 0000000000..adc4bf08fa
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_VariableShape.pbtxt
@@ -0,0 +1,14 @@
+op {
+ graph_op_name: "VariableShape"
+ summary: "Returns the shape of the variable pointed to by `resource`."
+ description: <<END
+This operation returns a 1-D integer tensor representing the shape of `input`.
+
+For example:
+
+```
+# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
+shape(t) ==> [2, 2, 3]
+```
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_VariableV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_VariableV2.pbtxt
new file mode 100644
index 0000000000..6341cc69f6
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_VariableV2.pbtxt
@@ -0,0 +1,44 @@
+op {
+ graph_op_name: "VariableV2"
+ endpoint {
+ name: "Variable"
+ }
+ out_arg {
+ name: "ref"
+ description: <<END
+A reference to the variable tensor.
+END
+ }
+ attr {
+ name: "shape"
+ description: <<END
+The shape of the variable tensor.
+END
+ }
+ attr {
+ name: "dtype"
+ description: <<END
+The type of elements in the variable tensor.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this variable is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this variable is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ summary: "Holds state in the form of a tensor that persists across steps."
+ description: <<END
+Outputs a ref to the tensor state so it may be read or modified.
+TODO(zhifengc/mrry): Adds a pointer to a more detail document
+about sharing states in tensorflow.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_W.pbtxt b/tensorflow/core/api_def/base_api/api_def_Where.pbtxt
index 9120fe334e..a6ea62c4cc 100644
--- a/tensorflow/core/api_def/base_api/api_def_W.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_Where.pbtxt
@@ -1,9 +1,10 @@
op {
graph_op_name: "Where"
- endpoint {
- name: "Where"
+ in_arg {
+ name: "input"
+ rename_to: "condition"
}
- summary: "Returns locations of true values in a boolean tensor."
+ summary: "Returns locations of nonzero / true values in a tensor."
description: <<END
This operation returns the coordinates of true elements in `input`. The
coordinates are returned in a 2-D tensor where the first dimension (rows)
@@ -35,38 +36,34 @@ where(input) ==> [[0, 0, 0],
[1, 0, 1],
[1, 1, 1],
[2, 1, 1]]
+
+# `input` tensor is [[[1.5, 0.0]
+# [-0.5, 0.0]]
+# [[0.0, 0.25]
+# [0.0, 0.75]]
+# [[0.0, 0.0]
+# [0.0, 0.01]]]
+# 'input' has 5 nonzero values, so output has 5 coordinates.
+# 'input' has rank of 3, so coordinates have three indices.
+where(input) ==> [[0, 0, 0],
+ [0, 1, 0],
+ [1, 0, 1],
+ [1, 1, 1],
+ [2, 1, 1]]
+
+# `input` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j]
+# [0.0 + 0.5j, 0.0 + 0.0j]]
+# [[0.0 + 0.0j, 0.25 + 1.5j]
+# [0.0 + 0.0j, 0.75 + 0.0j]]
+# [[0.0 + 0.0j, 0.0 + 0.0j]
+# [0.0 + 0.0j, 0.01 + 0.0j]]]
+# 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
+# 'input' has rank of 3, so coordinates have three indices.
+where(input) ==> [[0, 0, 0],
+ [0, 1, 0],
+ [1, 0, 1],
+ [1, 1, 1],
+ [2, 1, 1]]
```
END
}
-op {
- graph_op_name: "WholeFileReader"
- endpoint {
- name: "WholeFileReader"
- }
- summary: "A Reader that outputs the entire contents of a file as a value."
- description: <<END
-To use, enqueue filenames in a Queue. The output of ReaderRead will
-be a filename (key) and the contents of that file (value).
-END
-}
-op {
- graph_op_name: "WholeFileReaderV2"
- endpoint {
- name: "WholeFileReaderV2"
- }
- summary: "A Reader that outputs the entire contents of a file as a value."
- description: <<END
-To use, enqueue filenames in a Queue. The output of ReaderRead will
-be a filename (key) and the contents of that file (value).
-END
-}
-op {
- graph_op_name: "WriteFile"
- endpoint {
- name: "WriteFile"
- }
- summary: "Writes contents to the file at input filename. Creates file and recursively"
- description: <<END
-creates directory if not existing.
-END
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_WholeFileReader.pbtxt b/tensorflow/core/api_def/base_api/api_def_WholeFileReader.pbtxt
new file mode 100644
index 0000000000..32180e0737
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_WholeFileReader.pbtxt
@@ -0,0 +1,29 @@
+op {
+ graph_op_name: "WholeFileReader"
+ visibility: SKIP
+ out_arg {
+ name: "reader_handle"
+ description: <<END
+The handle to reference the Reader.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this reader is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this reader is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ summary: "A Reader that outputs the entire contents of a file as a value."
+ description: <<END
+To use, enqueue filenames in a Queue. The output of ReaderRead will
+be a filename (key) and the contents of that file (value).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_WholeFileReaderV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_WholeFileReaderV2.pbtxt
new file mode 100644
index 0000000000..f9063f9588
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_WholeFileReaderV2.pbtxt
@@ -0,0 +1,31 @@
+op {
+ graph_op_name: "WholeFileReaderV2"
+ endpoint {
+ name: "WholeFileReader"
+ }
+ out_arg {
+ name: "reader_handle"
+ description: <<END
+The handle to reference the Reader.
+END
+ }
+ attr {
+ name: "container"
+ description: <<END
+If non-empty, this reader is placed in the given container.
+Otherwise, a default container is used.
+END
+ }
+ attr {
+ name: "shared_name"
+ description: <<END
+If non-empty, this reader is named in the given bucket
+with this shared_name. Otherwise, the node name is used instead.
+END
+ }
+ summary: "A Reader that outputs the entire contents of a file as a value."
+ description: <<END
+To use, enqueue filenames in a Queue. The output of ReaderRead will
+be a filename (key) and the contents of that file (value).
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_WriteFile.pbtxt b/tensorflow/core/api_def/base_api/api_def_WriteFile.pbtxt
new file mode 100644
index 0000000000..28b09c9bf1
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_WriteFile.pbtxt
@@ -0,0 +1,19 @@
+op {
+ graph_op_name: "WriteFile"
+ in_arg {
+ name: "filename"
+ description: <<END
+scalar. The name of the file to which we write the contents.
+END
+ }
+ in_arg {
+ name: "contents"
+ description: <<END
+scalar. The content to be written to the output file.
+END
+ }
+ summary: "Writes contents to the file at input filename. Creates file and recursively"
+ description: <<END
+creates directory if not existing.
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Z.pbtxt b/tensorflow/core/api_def/base_api/api_def_Z.pbtxt
deleted file mode 100644
index f83fef054c..0000000000
--- a/tensorflow/core/api_def/base_api/api_def_Z.pbtxt
+++ /dev/null
@@ -1,27 +0,0 @@
-op {
- graph_op_name: "ZerosLike"
- endpoint {
- name: "ZerosLike"
- }
- summary: "Returns a tensor of zeros with the same shape and type as x."
-}
-op {
- graph_op_name: "Zeta"
- endpoint {
- name: "Zeta"
- }
- summary: "Compute the Hurwitz zeta function \\\\(\\zeta(x, q)\\\\)."
- description: <<END
-The Hurwitz zeta function is defined as:
-
-
-\\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
-END
-}
-op {
- graph_op_name: "ZipDataset"
- endpoint {
- name: "ZipDataset"
- }
- summary: "Creates a dataset that zips together `input_datasets`."
-}
diff --git a/tensorflow/core/api_def/base_api/api_def_ZerosLike.pbtxt b/tensorflow/core/api_def/base_api/api_def_ZerosLike.pbtxt
new file mode 100644
index 0000000000..37c2d5b534
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ZerosLike.pbtxt
@@ -0,0 +1,16 @@
+op {
+ graph_op_name: "ZerosLike"
+ in_arg {
+ name: "x"
+ description: <<END
+a tensor of type T.
+END
+ }
+ out_arg {
+ name: "y"
+ description: <<END
+a tensor of the same shape and type as x but filled with zeros.
+END
+ }
+ summary: "Returns a tensor of zeros with the same shape and type as x."
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_Zeta.pbtxt b/tensorflow/core/api_def/base_api/api_def_Zeta.pbtxt
new file mode 100644
index 0000000000..c02860a16a
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_Zeta.pbtxt
@@ -0,0 +1,10 @@
+op {
+ graph_op_name: "Zeta"
+ summary: "Compute the Hurwitz zeta function \\\\(\\zeta(x, q)\\\\)."
+ description: <<END
+The Hurwitz zeta function is defined as:
+
+
+\\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
+END
+}
diff --git a/tensorflow/core/api_def/base_api/api_def_ZipDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_ZipDataset.pbtxt
new file mode 100644
index 0000000000..7495693ccc
--- /dev/null
+++ b/tensorflow/core/api_def/base_api/api_def_ZipDataset.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ZipDataset"
+ summary: "Creates a dataset that zips together `input_datasets`."
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_A.pbtxt b/tensorflow/core/api_def/python_api/api_def_A.pbtxt
deleted file mode 100644
index df9b3ad0b6..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_A.pbtxt
+++ /dev/null
@@ -1,56 +0,0 @@
-op {
- graph_op_name: "Abs"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "AddManySparseToTensorsMap"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "AddN"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "AddSparseToTensorsMap"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "AdjustContrastv2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "All"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "AllCandidateSampler"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Any"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Assert"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "AudioSummary"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "AudioSummaryV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "AvgPool"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "AvgPool3DGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "AvgPoolGrad"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_Abs.pbtxt b/tensorflow/core/api_def/python_api/api_def_Abs.pbtxt
new file mode 100644
index 0000000000..1f21fae28b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Abs.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Abs"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_AccumulateNV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_AccumulateNV2.pbtxt
new file mode 100644
index 0000000000..a92ff5a406
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_AccumulateNV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AccumulateNV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_AddManySparseToTensorsMap.pbtxt b/tensorflow/core/api_def/python_api/api_def_AddManySparseToTensorsMap.pbtxt
new file mode 100644
index 0000000000..7ece23fd65
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_AddManySparseToTensorsMap.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AddManySparseToTensorsMap"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_AddN.pbtxt b/tensorflow/core/api_def/python_api/api_def_AddN.pbtxt
new file mode 100644
index 0000000000..8cc22ad4dc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_AddN.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AddN"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_AddSparseToTensorsMap.pbtxt b/tensorflow/core/api_def/python_api/api_def_AddSparseToTensorsMap.pbtxt
new file mode 100644
index 0000000000..c4446bba28
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_AddSparseToTensorsMap.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AddSparseToTensorsMap"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_AddV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_AddV2.pbtxt
new file mode 100644
index 0000000000..77c879c6b3
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_AddV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AddV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_AdjustContrastv2.pbtxt b/tensorflow/core/api_def/python_api/api_def_AdjustContrastv2.pbtxt
new file mode 100644
index 0000000000..889d147406
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_AdjustContrastv2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AdjustContrastv2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_All.pbtxt b/tensorflow/core/api_def/python_api/api_def_All.pbtxt
new file mode 100644
index 0000000000..ca780f037f
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_All.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "All"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_AllCandidateSampler.pbtxt b/tensorflow/core/api_def/python_api/api_def_AllCandidateSampler.pbtxt
new file mode 100644
index 0000000000..200ae0ae49
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_AllCandidateSampler.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AllCandidateSampler"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Any.pbtxt b/tensorflow/core/api_def/python_api/api_def_Any.pbtxt
new file mode 100644
index 0000000000..4afa8acecb
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Any.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Any"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Assert.pbtxt b/tensorflow/core/api_def/python_api/api_def_Assert.pbtxt
new file mode 100644
index 0000000000..12e27ee0bc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Assert.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Assert"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_AudioSummary.pbtxt b/tensorflow/core/api_def/python_api/api_def_AudioSummary.pbtxt
new file mode 100644
index 0000000000..94da1e06ea
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_AudioSummary.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AudioSummary"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_AudioSummaryV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_AudioSummaryV2.pbtxt
new file mode 100644
index 0000000000..1715576d09
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_AudioSummaryV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AudioSummaryV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_AvgPool.pbtxt b/tensorflow/core/api_def/python_api/api_def_AvgPool.pbtxt
new file mode 100644
index 0000000000..c58d6c6039
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_AvgPool.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AvgPool"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_AvgPool3DGrad.pbtxt b/tensorflow/core/api_def/python_api/api_def_AvgPool3DGrad.pbtxt
new file mode 100644
index 0000000000..5e4049faf4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_AvgPool3DGrad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "AvgPool3DGrad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_B.pbtxt b/tensorflow/core/api_def/python_api/api_def_B.pbtxt
deleted file mode 100644
index 49c74ccad2..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_B.pbtxt
+++ /dev/null
@@ -1,142 +0,0 @@
-op {
- graph_op_name: "Barrier"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BarrierClose"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BarrierIncompleteSize"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BarrierInsertMany"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BarrierReadySize"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BarrierTakeMany"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchCholesky"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchCholeskyGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchFFT"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchFFT2D"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchFFT3D"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchIFFT"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchIFFT2D"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchIFFT3D"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchMatMul"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchMatrixDeterminant"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchMatrixInverse"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchMatrixSolve"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchMatrixSolveLs"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchMatrixTriangularSolve"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchNormWithGlobalNormalization"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchNormWithGlobalNormalizationGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchSelfAdjointEig"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchSelfAdjointEigV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchSvd"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BatchToSpace"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BiasAdd"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BiasAddV1"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BitwiseAnd"
- endpoint {
- name: "bitwise.bitwise_and"
- }
-}
-op {
- graph_op_name: "BitwiseOr"
- endpoint {
- name: "bitwise.bitwise_or"
- }
-}
-op {
- graph_op_name: "BitwiseXor"
- endpoint {
- name: "bitwise.bitwise_xor"
- }
-}
-op {
- graph_op_name: "BroadcastArgs"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "BroadcastGradientArgs"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Bucketize"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_Barrier.pbtxt b/tensorflow/core/api_def/python_api/api_def_Barrier.pbtxt
new file mode 100644
index 0000000000..b6463fcf61
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Barrier.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Barrier"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BarrierClose.pbtxt b/tensorflow/core/api_def/python_api/api_def_BarrierClose.pbtxt
new file mode 100644
index 0000000000..d903a2e29e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BarrierClose.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BarrierClose"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BarrierIncompleteSize.pbtxt b/tensorflow/core/api_def/python_api/api_def_BarrierIncompleteSize.pbtxt
new file mode 100644
index 0000000000..e9c5a8e7fe
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BarrierIncompleteSize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BarrierIncompleteSize"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BarrierInsertMany.pbtxt b/tensorflow/core/api_def/python_api/api_def_BarrierInsertMany.pbtxt
new file mode 100644
index 0000000000..3c7b060d41
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BarrierInsertMany.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BarrierInsertMany"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BarrierReadySize.pbtxt b/tensorflow/core/api_def/python_api/api_def_BarrierReadySize.pbtxt
new file mode 100644
index 0000000000..07729e0704
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BarrierReadySize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BarrierReadySize"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BarrierTakeMany.pbtxt b/tensorflow/core/api_def/python_api/api_def_BarrierTakeMany.pbtxt
new file mode 100644
index 0000000000..de6448e3fe
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BarrierTakeMany.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BarrierTakeMany"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchCholesky.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchCholesky.pbtxt
new file mode 100644
index 0000000000..83241f8e8b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchCholesky.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchCholesky"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchCholeskyGrad.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchCholeskyGrad.pbtxt
new file mode 100644
index 0000000000..60ddfd7a26
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchCholeskyGrad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchCholeskyGrad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchFFT.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchFFT.pbtxt
new file mode 100644
index 0000000000..f735280687
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchFFT.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchFFT"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchFFT2D.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchFFT2D.pbtxt
new file mode 100644
index 0000000000..a7520e86d4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchFFT2D.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchFFT2D"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchFFT3D.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchFFT3D.pbtxt
new file mode 100644
index 0000000000..27bc32046b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchFFT3D.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchFFT3D"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchIFFT.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchIFFT.pbtxt
new file mode 100644
index 0000000000..7f3bb2ba5d
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchIFFT.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchIFFT"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchIFFT2D.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchIFFT2D.pbtxt
new file mode 100644
index 0000000000..b944924595
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchIFFT2D.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchIFFT2D"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchIFFT3D.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchIFFT3D.pbtxt
new file mode 100644
index 0000000000..13cccda1d2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchIFFT3D.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchIFFT3D"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchMatMul.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchMatMul.pbtxt
new file mode 100644
index 0000000000..b3db197c26
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchMatMul.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchMatMul"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchMatrixDeterminant.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchMatrixDeterminant.pbtxt
new file mode 100644
index 0000000000..202b0d149b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchMatrixDeterminant.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchMatrixDeterminant"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchMatrixInverse.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchMatrixInverse.pbtxt
new file mode 100644
index 0000000000..3fa68bdd3e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchMatrixInverse.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchMatrixInverse"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchMatrixSolve.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchMatrixSolve.pbtxt
new file mode 100644
index 0000000000..a458423e38
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchMatrixSolve.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchMatrixSolve"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchMatrixSolveLs.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchMatrixSolveLs.pbtxt
new file mode 100644
index 0000000000..61b4ca3999
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchMatrixSolveLs.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchMatrixSolveLs"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchMatrixTriangularSolve.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchMatrixTriangularSolve.pbtxt
new file mode 100644
index 0000000000..28e6742595
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchMatrixTriangularSolve.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchMatrixTriangularSolve"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchNormWithGlobalNormalization.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchNormWithGlobalNormalization.pbtxt
new file mode 100644
index 0000000000..e7a042bc61
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchNormWithGlobalNormalization.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchNormWithGlobalNormalization"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt
new file mode 100644
index 0000000000..e92f3a30f4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchNormWithGlobalNormalizationGrad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchNormWithGlobalNormalizationGrad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchSelfAdjointEig.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchSelfAdjointEig.pbtxt
new file mode 100644
index 0000000000..26fef1c4b4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchSelfAdjointEig.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchSelfAdjointEig"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchSelfAdjointEigV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchSelfAdjointEigV2.pbtxt
new file mode 100644
index 0000000000..660523a8c4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchSelfAdjointEigV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchSelfAdjointEigV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchSvd.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchSvd.pbtxt
new file mode 100644
index 0000000000..927f5483a9
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchSvd.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchSvd"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BatchToSpace.pbtxt b/tensorflow/core/api_def/python_api/api_def_BatchToSpace.pbtxt
new file mode 100644
index 0000000000..c106bb1367
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BatchToSpace.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BatchToSpace"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BiasAdd.pbtxt b/tensorflow/core/api_def/python_api/api_def_BiasAdd.pbtxt
new file mode 100644
index 0000000000..c2397ac0ac
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BiasAdd.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BiasAdd"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BiasAddV1.pbtxt b/tensorflow/core/api_def/python_api/api_def_BiasAddV1.pbtxt
new file mode 100644
index 0000000000..93dcabecb8
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BiasAddV1.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BiasAddV1"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BitwiseAnd.pbtxt b/tensorflow/core/api_def/python_api/api_def_BitwiseAnd.pbtxt
new file mode 100644
index 0000000000..288a3f5fc2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BitwiseAnd.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "BitwiseAnd"
+ endpoint {
+ name: "bitwise.bitwise_and"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BitwiseOr.pbtxt b/tensorflow/core/api_def/python_api/api_def_BitwiseOr.pbtxt
new file mode 100644
index 0000000000..150dbf6bfd
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BitwiseOr.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "BitwiseOr"
+ endpoint {
+ name: "bitwise.bitwise_or"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BitwiseXor.pbtxt b/tensorflow/core/api_def/python_api/api_def_BitwiseXor.pbtxt
new file mode 100644
index 0000000000..4f7c6fb5fc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BitwiseXor.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "BitwiseXor"
+ endpoint {
+ name: "bitwise.bitwise_xor"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_BroadcastArgs.pbtxt b/tensorflow/core/api_def/python_api/api_def_BroadcastArgs.pbtxt
new file mode 100644
index 0000000000..5933fdfea1
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_BroadcastArgs.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "BroadcastArgs"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Bucketize.pbtxt b/tensorflow/core/api_def/python_api/api_def_Bucketize.pbtxt
new file mode 100644
index 0000000000..49fbe175ae
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Bucketize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Bucketize"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_C.pbtxt b/tensorflow/core/api_def/python_api/api_def_C.pbtxt
deleted file mode 100644
index 42ed24b133..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_C.pbtxt
+++ /dev/null
@@ -1,59 +0,0 @@
-op {
- graph_op_name: "CTCBeamSearchDecoder"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "CTCGreedyDecoder"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "CTCLoss"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Cholesky"
- endpoint {
- name: "cholesky"
- }
- endpoint {
- name: "linalg.cholesky"
- }
-}
-op {
- graph_op_name: "Complex"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ComplexAbs"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ComputeAccidentalHits"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Concat"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ConcatOffset"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ConcatV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Conj"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Const"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "CropAndResize"
- endpoint {
- name: "image.crop_and_resize"
- }
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_CTCBeamSearchDecoder.pbtxt b/tensorflow/core/api_def/python_api/api_def_CTCBeamSearchDecoder.pbtxt
new file mode 100644
index 0000000000..4cc4ad05aa
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_CTCBeamSearchDecoder.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "CTCBeamSearchDecoder"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_CTCGreedyDecoder.pbtxt b/tensorflow/core/api_def/python_api/api_def_CTCGreedyDecoder.pbtxt
new file mode 100644
index 0000000000..4b540add1f
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_CTCGreedyDecoder.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "CTCGreedyDecoder"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_CTCLoss.pbtxt b/tensorflow/core/api_def/python_api/api_def_CTCLoss.pbtxt
new file mode 100644
index 0000000000..a0c6bcd394
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_CTCLoss.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "CTCLoss"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Cholesky.pbtxt b/tensorflow/core/api_def/python_api/api_def_Cholesky.pbtxt
new file mode 100644
index 0000000000..2676c92bfb
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Cholesky.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "Cholesky"
+ endpoint {
+ name: "cholesky"
+ }
+ endpoint {
+ name: "linalg.cholesky"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Complex.pbtxt b/tensorflow/core/api_def/python_api/api_def_Complex.pbtxt
new file mode 100644
index 0000000000..b9ec8059f7
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Complex.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Complex"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ComplexAbs.pbtxt b/tensorflow/core/api_def/python_api/api_def_ComplexAbs.pbtxt
new file mode 100644
index 0000000000..77a8a44872
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ComplexAbs.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ComplexAbs"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ComputeAccidentalHits.pbtxt b/tensorflow/core/api_def/python_api/api_def_ComputeAccidentalHits.pbtxt
new file mode 100644
index 0000000000..744949639c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ComputeAccidentalHits.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ComputeAccidentalHits"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Concat.pbtxt b/tensorflow/core/api_def/python_api/api_def_Concat.pbtxt
new file mode 100644
index 0000000000..503e87cd6c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Concat.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Concat"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ConcatOffset.pbtxt b/tensorflow/core/api_def/python_api/api_def_ConcatOffset.pbtxt
new file mode 100644
index 0000000000..d1bcb77e00
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ConcatOffset.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ConcatOffset"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ConcatV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_ConcatV2.pbtxt
new file mode 100644
index 0000000000..d5b5321fdc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ConcatV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ConcatV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Conj.pbtxt b/tensorflow/core/api_def/python_api/api_def_Conj.pbtxt
new file mode 100644
index 0000000000..c36b1f7fad
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Conj.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Conj"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ConjugateTranspose.pbtxt b/tensorflow/core/api_def/python_api/api_def_ConjugateTranspose.pbtxt
new file mode 100644
index 0000000000..6a8de53e73
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ConjugateTranspose.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ConjugateTranspose"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Const.pbtxt b/tensorflow/core/api_def/python_api/api_def_Const.pbtxt
new file mode 100644
index 0000000000..95d162ac41
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Const.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Const"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_CropAndResize.pbtxt b/tensorflow/core/api_def/python_api/api_def_CropAndResize.pbtxt
new file mode 100644
index 0000000000..ce65f8172d
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_CropAndResize.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "CropAndResize"
+ endpoint {
+ name: "image.crop_and_resize"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_D.pbtxt b/tensorflow/core/api_def/python_api/api_def_D.pbtxt
deleted file mode 100644
index c73982aed0..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_D.pbtxt
+++ /dev/null
@@ -1,74 +0,0 @@
-op {
- graph_op_name: "DebugGradientIdentity"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "DecodeAndCropJpeg"
- endpoint {
- name: "image.decode_and_crop_jpeg"
- }
-}
-op {
- graph_op_name: "DecodeBmp"
- endpoint {
- name: "image.decode_bmp"
- }
-}
-op {
- graph_op_name: "DecodeCSV"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "DecodeGif"
- endpoint {
- name: "image.decode_gif"
- }
-}
-op {
- graph_op_name: "DecodeJpeg"
- endpoint {
- name: "image.decode_jpeg"
- }
-}
-op {
- graph_op_name: "DecodePng"
- endpoint {
- name: "image.decode_png"
- }
-}
-op {
- graph_op_name: "DeleteSessionTensor"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "DepthwiseConv2dNative"
- endpoint {
- name: "nn.depthwise_conv2d_native"
- }
-}
-op {
- graph_op_name: "DepthwiseConv2dNativeBackpropFilter"
- endpoint {
- name: "nn.depthwise_conv2d_native_backprop_filter"
- }
-}
-op {
- graph_op_name: "DepthwiseConv2dNativeBackpropInput"
- endpoint {
- name: "nn.depthwise_conv2d_native_backprop_input"
- }
-}
-op {
- graph_op_name: "DeserializeManySparse"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "DestroyTemporaryVariable"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "DrawBoundingBoxes"
- endpoint {
- name: "image.draw_bounding_boxes"
- }
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_DebugGradientIdentity.pbtxt b/tensorflow/core/api_def/python_api/api_def_DebugGradientIdentity.pbtxt
new file mode 100644
index 0000000000..7d50c5c868
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DebugGradientIdentity.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "DebugGradientIdentity"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DecodeAndCropJpeg.pbtxt b/tensorflow/core/api_def/python_api/api_def_DecodeAndCropJpeg.pbtxt
new file mode 100644
index 0000000000..fbe9c88253
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DecodeAndCropJpeg.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "DecodeAndCropJpeg"
+ endpoint {
+ name: "image.decode_and_crop_jpeg"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DecodeBmp.pbtxt b/tensorflow/core/api_def/python_api/api_def_DecodeBmp.pbtxt
new file mode 100644
index 0000000000..573d83f373
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DecodeBmp.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "DecodeBmp"
+ endpoint {
+ name: "image.decode_bmp"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DecodeCSV.pbtxt b/tensorflow/core/api_def/python_api/api_def_DecodeCSV.pbtxt
new file mode 100644
index 0000000000..21ef77e381
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DecodeCSV.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "DecodeCSV"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DecodeGif.pbtxt b/tensorflow/core/api_def/python_api/api_def_DecodeGif.pbtxt
new file mode 100644
index 0000000000..eed64df79c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DecodeGif.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "DecodeGif"
+ endpoint {
+ name: "image.decode_gif"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DecodeJpeg.pbtxt b/tensorflow/core/api_def/python_api/api_def_DecodeJpeg.pbtxt
new file mode 100644
index 0000000000..994bc4e1f4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DecodeJpeg.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "DecodeJpeg"
+ endpoint {
+ name: "image.decode_jpeg"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DecodePng.pbtxt b/tensorflow/core/api_def/python_api/api_def_DecodePng.pbtxt
new file mode 100644
index 0000000000..309eec5ac3
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DecodePng.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "DecodePng"
+ endpoint {
+ name: "image.decode_png"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DeleteSessionTensor.pbtxt b/tensorflow/core/api_def/python_api/api_def_DeleteSessionTensor.pbtxt
new file mode 100644
index 0000000000..08bf4a80ec
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DeleteSessionTensor.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "DeleteSessionTensor"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNative.pbtxt b/tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNative.pbtxt
new file mode 100644
index 0000000000..1bb17e548d
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNative.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "DepthwiseConv2dNative"
+ endpoint {
+ name: "nn.depthwise_conv2d_native"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt b/tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt
new file mode 100644
index 0000000000..6f9df4b1a1
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "DepthwiseConv2dNativeBackpropFilter"
+ endpoint {
+ name: "nn.depthwise_conv2d_native_backprop_filter"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt b/tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt
new file mode 100644
index 0000000000..0bd72539e9
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DepthwiseConv2dNativeBackpropInput.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "DepthwiseConv2dNativeBackpropInput"
+ endpoint {
+ name: "nn.depthwise_conv2d_native_backprop_input"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DeserializeManySparse.pbtxt b/tensorflow/core/api_def/python_api/api_def_DeserializeManySparse.pbtxt
new file mode 100644
index 0000000000..fd43a05577
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DeserializeManySparse.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "DeserializeManySparse"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DestroyTemporaryVariable.pbtxt b/tensorflow/core/api_def/python_api/api_def_DestroyTemporaryVariable.pbtxt
new file mode 100644
index 0000000000..e51a25a2c0
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DestroyTemporaryVariable.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "DestroyTemporaryVariable"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_DrawBoundingBoxes.pbtxt b/tensorflow/core/api_def/python_api/api_def_DrawBoundingBoxes.pbtxt
new file mode 100644
index 0000000000..54d644c013
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_DrawBoundingBoxes.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "DrawBoundingBoxes"
+ endpoint {
+ name: "image.draw_bounding_boxes"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_E.pbtxt b/tensorflow/core/api_def/python_api/api_def_E.pbtxt
deleted file mode 100644
index 236c344167..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_E.pbtxt
+++ /dev/null
@@ -1,46 +0,0 @@
-op {
- graph_op_name: "EditDistance"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Elu"
- endpoint {
- name: "nn.elu"
- }
-}
-op {
- graph_op_name: "EluGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "EncodeJpeg"
- endpoint {
- name: "image.encode_jpeg"
- }
-}
-op {
- graph_op_name: "EncodePng"
- endpoint {
- name: "image.encode_png"
- }
-}
-op {
- graph_op_name: "Exit"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ExpandDims"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ExtractGlimpse"
- endpoint {
- name: "image.extract_glimpse"
- }
-}
-op {
- graph_op_name: "ExtractJpegShape"
- endpoint {
- name: "image.extract_jpeg_shape"
- }
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_EditDistance.pbtxt b/tensorflow/core/api_def/python_api/api_def_EditDistance.pbtxt
new file mode 100644
index 0000000000..c77accf370
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_EditDistance.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "EditDistance"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Elu.pbtxt b/tensorflow/core/api_def/python_api/api_def_Elu.pbtxt
new file mode 100644
index 0000000000..15a9f6568f
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Elu.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "Elu"
+ endpoint {
+ name: "nn.elu"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_EncodeJpeg.pbtxt b/tensorflow/core/api_def/python_api/api_def_EncodeJpeg.pbtxt
new file mode 100644
index 0000000000..5c31e9d0f3
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_EncodeJpeg.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "EncodeJpeg"
+ endpoint {
+ name: "image.encode_jpeg"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_EncodePng.pbtxt b/tensorflow/core/api_def/python_api/api_def_EncodePng.pbtxt
new file mode 100644
index 0000000000..42717ba7d5
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_EncodePng.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "EncodePng"
+ endpoint {
+ name: "image.encode_png"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ExpandDims.pbtxt b/tensorflow/core/api_def/python_api/api_def_ExpandDims.pbtxt
new file mode 100644
index 0000000000..29979dbf0a
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ExpandDims.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ExpandDims"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ExtractGlimpse.pbtxt b/tensorflow/core/api_def/python_api/api_def_ExtractGlimpse.pbtxt
new file mode 100644
index 0000000000..ed8abdfcd7
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ExtractGlimpse.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "ExtractGlimpse"
+ endpoint {
+ name: "image.extract_glimpse"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ExtractJpegShape.pbtxt b/tensorflow/core/api_def/python_api/api_def_ExtractJpegShape.pbtxt
new file mode 100644
index 0000000000..6849a6d3fa
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ExtractJpegShape.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "ExtractJpegShape"
+ endpoint {
+ name: "image.extract_jpeg_shape"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_F.pbtxt b/tensorflow/core/api_def/python_api/api_def_F.pbtxt
deleted file mode 100644
index a29b6a3725..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_F.pbtxt
+++ /dev/null
@@ -1,73 +0,0 @@
-op {
- graph_op_name: "FFT"
- endpoint {
- name: "fft"
- }
- endpoint {
- name: "spectral.fft"
- }
-}
-op {
- graph_op_name: "FIFOQueue"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "FIFOQueueV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Fact"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "FakeQueue"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "FixedLengthRecordReader"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "FixedLengthRecordReaderV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "FixedUnigramCandidateSampler"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "FloorDiv"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "FloorMod"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "FractionalAvgPool"
- endpoint {
- name: "nn.fractional_avg_pool"
- }
-}
-op {
- graph_op_name: "FractionalAvgPoolGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "FractionalMaxPool"
- endpoint {
- name: "nn.fractional_max_pool"
- }
-}
-op {
- graph_op_name: "FractionalMaxPoolGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "FusedBatchNorm"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "FusedBatchNormV2"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_FFT.pbtxt b/tensorflow/core/api_def/python_api/api_def_FFT.pbtxt
new file mode 100644
index 0000000000..3bcab99415
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FFT.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "FFT"
+ endpoint {
+ name: "fft"
+ }
+ endpoint {
+ name: "spectral.fft"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FIFOQueue.pbtxt b/tensorflow/core/api_def/python_api/api_def_FIFOQueue.pbtxt
new file mode 100644
index 0000000000..b51063b2cf
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FIFOQueue.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "FIFOQueue"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FIFOQueueV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_FIFOQueueV2.pbtxt
new file mode 100644
index 0000000000..850fe5b899
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FIFOQueueV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "FIFOQueueV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Fact.pbtxt b/tensorflow/core/api_def/python_api/api_def_Fact.pbtxt
new file mode 100644
index 0000000000..9a8328bb84
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Fact.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Fact"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FakeQueue.pbtxt b/tensorflow/core/api_def/python_api/api_def_FakeQueue.pbtxt
new file mode 100644
index 0000000000..0c5cc7116b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FakeQueue.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "FakeQueue"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FixedLengthRecordReader.pbtxt b/tensorflow/core/api_def/python_api/api_def_FixedLengthRecordReader.pbtxt
new file mode 100644
index 0000000000..da211a3bfc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FixedLengthRecordReader.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "FixedLengthRecordReader"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FixedLengthRecordReaderV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_FixedLengthRecordReaderV2.pbtxt
new file mode 100644
index 0000000000..c4606991e9
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FixedLengthRecordReaderV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "FixedLengthRecordReaderV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FixedUnigramCandidateSampler.pbtxt b/tensorflow/core/api_def/python_api/api_def_FixedUnigramCandidateSampler.pbtxt
new file mode 100644
index 0000000000..ca70db18ac
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FixedUnigramCandidateSampler.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "FixedUnigramCandidateSampler"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FloorDiv.pbtxt b/tensorflow/core/api_def/python_api/api_def_FloorDiv.pbtxt
new file mode 100644
index 0000000000..26598ab1fb
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FloorDiv.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "FloorDiv"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FloorMod.pbtxt b/tensorflow/core/api_def/python_api/api_def_FloorMod.pbtxt
new file mode 100644
index 0000000000..ef562e93a0
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FloorMod.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "FloorMod"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FractionalAvgPool.pbtxt b/tensorflow/core/api_def/python_api/api_def_FractionalAvgPool.pbtxt
new file mode 100644
index 0000000000..16ed9b56f2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FractionalAvgPool.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "FractionalAvgPool"
+ endpoint {
+ name: "nn.fractional_avg_pool"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FractionalMaxPool.pbtxt b/tensorflow/core/api_def/python_api/api_def_FractionalMaxPool.pbtxt
new file mode 100644
index 0000000000..6955595208
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FractionalMaxPool.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "FractionalMaxPool"
+ endpoint {
+ name: "nn.fractional_max_pool"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FusedBatchNorm.pbtxt b/tensorflow/core/api_def/python_api/api_def_FusedBatchNorm.pbtxt
new file mode 100644
index 0000000000..0ac0fe7252
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FusedBatchNorm.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "FusedBatchNorm"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_FusedBatchNormV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_FusedBatchNormV2.pbtxt
new file mode 100644
index 0000000000..70a79c906e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_FusedBatchNormV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "FusedBatchNormV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_G.pbtxt b/tensorflow/core/api_def/python_api/api_def_G.pbtxt
deleted file mode 100644
index 8235d245fe..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_G.pbtxt
+++ /dev/null
@@ -1,16 +0,0 @@
-op {
- graph_op_name: "GenerateVocabRemapping"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "GetSessionHandle"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "GetSessionHandleV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "GetSessionTensor"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_GenerateVocabRemapping.pbtxt b/tensorflow/core/api_def/python_api/api_def_GenerateVocabRemapping.pbtxt
new file mode 100644
index 0000000000..35f0993851
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_GenerateVocabRemapping.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "GenerateVocabRemapping"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_GetSessionHandle.pbtxt b/tensorflow/core/api_def/python_api/api_def_GetSessionHandle.pbtxt
new file mode 100644
index 0000000000..18396a1277
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_GetSessionHandle.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "GetSessionHandle"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_GetSessionHandleV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_GetSessionHandleV2.pbtxt
new file mode 100644
index 0000000000..39cbcca122
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_GetSessionHandleV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "GetSessionHandleV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_GetSessionTensor.pbtxt b/tensorflow/core/api_def/python_api/api_def_GetSessionTensor.pbtxt
new file mode 100644
index 0000000000..2ef75ed34d
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_GetSessionTensor.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "GetSessionTensor"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_H.pbtxt b/tensorflow/core/api_def/python_api/api_def_H.pbtxt
deleted file mode 100644
index 9f3fe2eb08..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_H.pbtxt
+++ /dev/null
@@ -1,18 +0,0 @@
-op {
- graph_op_name: "HSVToRGB"
- endpoint {
- name: "image.hsv_to_rgb"
- }
-}
-op {
- graph_op_name: "HashTable"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "HashTableV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "HistogramSummary"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_HSVToRGB.pbtxt b/tensorflow/core/api_def/python_api/api_def_HSVToRGB.pbtxt
new file mode 100644
index 0000000000..55998189f4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_HSVToRGB.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "HSVToRGB"
+ endpoint {
+ name: "image.hsv_to_rgb"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_HashTable.pbtxt b/tensorflow/core/api_def/python_api/api_def_HashTable.pbtxt
new file mode 100644
index 0000000000..d1c5b2a4dc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_HashTable.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "HashTable"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_HashTableV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_HashTableV2.pbtxt
new file mode 100644
index 0000000000..d702d4d0de
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_HashTableV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "HashTableV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_HistogramFixedWidth.pbtxt b/tensorflow/core/api_def/python_api/api_def_HistogramFixedWidth.pbtxt
new file mode 100644
index 0000000000..6cb4d9192e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_HistogramFixedWidth.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "HistogramFixedWidth"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_HistogramSummary.pbtxt b/tensorflow/core/api_def/python_api/api_def_HistogramSummary.pbtxt
new file mode 100644
index 0000000000..644807d16f
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_HistogramSummary.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "HistogramSummary"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_I.pbtxt b/tensorflow/core/api_def/python_api/api_def_I.pbtxt
deleted file mode 100644
index db6a54dbd4..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_I.pbtxt
+++ /dev/null
@@ -1,55 +0,0 @@
-op {
- graph_op_name: "IFFT"
- endpoint {
- name: "ifft"
- }
- endpoint {
- name: "spectral.ifft"
- }
-}
-op {
- graph_op_name: "IdentityReader"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "IdentityReaderV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ImageSummary"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "InTopK"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "InTopKV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "InitializeTable"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "InitializeTableFromTextFile"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "InitializeTableFromTextFileV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "InitializeTableV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "InvGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Invert"
- endpoint {
- name: "bitwise.invert"
- }
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_IFFT.pbtxt b/tensorflow/core/api_def/python_api/api_def_IFFT.pbtxt
new file mode 100644
index 0000000000..6bbc4ed720
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_IFFT.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "IFFT"
+ endpoint {
+ name: "ifft"
+ }
+ endpoint {
+ name: "spectral.ifft"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_IdentityReader.pbtxt b/tensorflow/core/api_def/python_api/api_def_IdentityReader.pbtxt
new file mode 100644
index 0000000000..0a337f1520
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_IdentityReader.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "IdentityReader"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_IdentityReaderV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_IdentityReaderV2.pbtxt
new file mode 100644
index 0000000000..efafd76c71
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_IdentityReaderV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "IdentityReaderV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ImageSummary.pbtxt b/tensorflow/core/api_def/python_api/api_def_ImageSummary.pbtxt
new file mode 100644
index 0000000000..521c885023
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ImageSummary.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ImageSummary"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_InTopK.pbtxt b/tensorflow/core/api_def/python_api/api_def_InTopK.pbtxt
new file mode 100644
index 0000000000..357b9df14b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_InTopK.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "InTopK"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_InTopKV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_InTopKV2.pbtxt
new file mode 100644
index 0000000000..a0a1c9e831
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_InTopKV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "InTopKV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_InitializeTable.pbtxt b/tensorflow/core/api_def/python_api/api_def_InitializeTable.pbtxt
new file mode 100644
index 0000000000..068030c755
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_InitializeTable.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "InitializeTable"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_InitializeTableFromTextFile.pbtxt b/tensorflow/core/api_def/python_api/api_def_InitializeTableFromTextFile.pbtxt
new file mode 100644
index 0000000000..dd0e586976
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_InitializeTableFromTextFile.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "InitializeTableFromTextFile"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_InitializeTableFromTextFileV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_InitializeTableFromTextFileV2.pbtxt
new file mode 100644
index 0000000000..659642056d
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_InitializeTableFromTextFileV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "InitializeTableFromTextFileV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_InitializeTableV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_InitializeTableV2.pbtxt
new file mode 100644
index 0000000000..ee73655258
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_InitializeTableV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "InitializeTableV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Invert.pbtxt b/tensorflow/core/api_def/python_api/api_def_Invert.pbtxt
new file mode 100644
index 0000000000..a41d05a3c9
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Invert.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "Invert"
+ endpoint {
+ name: "bitwise.invert"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_L.pbtxt b/tensorflow/core/api_def/python_api/api_def_L.pbtxt
deleted file mode 100644
index 083fbdae6f..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_L.pbtxt
+++ /dev/null
@@ -1,96 +0,0 @@
-op {
- graph_op_name: "L2Loss"
- endpoint {
- name: "nn.l2_loss"
- }
-}
-op {
- graph_op_name: "LMDBReader"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LRN"
- endpoint {
- name: "nn.local_response_normalization"
- }
- endpoint {
- name: "nn.lrn"
- }
-}
-op {
- graph_op_name: "LRNGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LearnedUnigramCandidateSampler"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LinSpace"
- endpoint {
- name: "lin_space"
- }
- endpoint {
- name: "linspace"
- }
-}
-op {
- graph_op_name: "ListDiff"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LoadAndRemapMatrix"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LogMatrixDeterminant"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LogSoftmax"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LogUniformCandidateSampler"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LookupTableExport"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LookupTableExportV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LookupTableFind"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LookupTableFindV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LookupTableImport"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LookupTableImportV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LookupTableInsert"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LookupTableInsertV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LookupTableSize"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "LookupTableSizeV2"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_L2Loss.pbtxt b/tensorflow/core/api_def/python_api/api_def_L2Loss.pbtxt
new file mode 100644
index 0000000000..de994e7f0a
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_L2Loss.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "L2Loss"
+ endpoint {
+ name: "nn.l2_loss"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LMDBReader.pbtxt b/tensorflow/core/api_def/python_api/api_def_LMDBReader.pbtxt
new file mode 100644
index 0000000000..63e261f6de
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LMDBReader.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LMDBReader"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LRN.pbtxt b/tensorflow/core/api_def/python_api/api_def_LRN.pbtxt
new file mode 100644
index 0000000000..b6567fe33e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LRN.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "LRN"
+ endpoint {
+ name: "nn.local_response_normalization"
+ }
+ endpoint {
+ name: "nn.lrn"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LearnedUnigramCandidateSampler.pbtxt b/tensorflow/core/api_def/python_api/api_def_LearnedUnigramCandidateSampler.pbtxt
new file mode 100644
index 0000000000..b005fe81c8
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LearnedUnigramCandidateSampler.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LearnedUnigramCandidateSampler"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LeftShift.pbtxt b/tensorflow/core/api_def/python_api/api_def_LeftShift.pbtxt
new file mode 100644
index 0000000000..bf703f3897
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LeftShift.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "LeftShift"
+ endpoint {
+ name: "bitwise.left_shift"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LinSpace.pbtxt b/tensorflow/core/api_def/python_api/api_def_LinSpace.pbtxt
new file mode 100644
index 0000000000..b1de2cb207
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LinSpace.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "LinSpace"
+ endpoint {
+ name: "lin_space"
+ }
+ endpoint {
+ name: "linspace"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ListDiff.pbtxt b/tensorflow/core/api_def/python_api/api_def_ListDiff.pbtxt
new file mode 100644
index 0000000000..6718d5bec1
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ListDiff.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ListDiff"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LoadAndRemapMatrix.pbtxt b/tensorflow/core/api_def/python_api/api_def_LoadAndRemapMatrix.pbtxt
new file mode 100644
index 0000000000..ac0f612443
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LoadAndRemapMatrix.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LoadAndRemapMatrix"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LogMatrixDeterminant.pbtxt b/tensorflow/core/api_def/python_api/api_def_LogMatrixDeterminant.pbtxt
new file mode 100644
index 0000000000..36d1eadab4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LogMatrixDeterminant.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LogMatrixDeterminant"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LogSoftmax.pbtxt b/tensorflow/core/api_def/python_api/api_def_LogSoftmax.pbtxt
new file mode 100644
index 0000000000..6fde770eec
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LogSoftmax.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LogSoftmax"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LogUniformCandidateSampler.pbtxt b/tensorflow/core/api_def/python_api/api_def_LogUniformCandidateSampler.pbtxt
new file mode 100644
index 0000000000..276f1f576b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LogUniformCandidateSampler.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LogUniformCandidateSampler"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LookupTableExport.pbtxt b/tensorflow/core/api_def/python_api/api_def_LookupTableExport.pbtxt
new file mode 100644
index 0000000000..016ad8dc60
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LookupTableExport.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LookupTableExport"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LookupTableExportV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_LookupTableExportV2.pbtxt
new file mode 100644
index 0000000000..37e9746ccc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LookupTableExportV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LookupTableExportV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LookupTableFind.pbtxt b/tensorflow/core/api_def/python_api/api_def_LookupTableFind.pbtxt
new file mode 100644
index 0000000000..739196deb9
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LookupTableFind.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LookupTableFind"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LookupTableFindV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_LookupTableFindV2.pbtxt
new file mode 100644
index 0000000000..da3be6db42
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LookupTableFindV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LookupTableFindV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LookupTableImport.pbtxt b/tensorflow/core/api_def/python_api/api_def_LookupTableImport.pbtxt
new file mode 100644
index 0000000000..52634b6fb0
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LookupTableImport.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LookupTableImport"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LookupTableImportV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_LookupTableImportV2.pbtxt
new file mode 100644
index 0000000000..75a4e00473
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LookupTableImportV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LookupTableImportV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LookupTableInsert.pbtxt b/tensorflow/core/api_def/python_api/api_def_LookupTableInsert.pbtxt
new file mode 100644
index 0000000000..72dcc5fe6b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LookupTableInsert.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LookupTableInsert"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LookupTableInsertV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_LookupTableInsertV2.pbtxt
new file mode 100644
index 0000000000..14ca6f80a5
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LookupTableInsertV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LookupTableInsertV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LookupTableSize.pbtxt b/tensorflow/core/api_def/python_api/api_def_LookupTableSize.pbtxt
new file mode 100644
index 0000000000..203b51aee4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LookupTableSize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LookupTableSize"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_LookupTableSizeV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_LookupTableSizeV2.pbtxt
new file mode 100644
index 0000000000..ba26ba0724
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_LookupTableSizeV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "LookupTableSizeV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_M.pbtxt b/tensorflow/core/api_def/python_api/api_def_M.pbtxt
deleted file mode 100644
index c8840e0c09..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_M.pbtxt
+++ /dev/null
@@ -1,174 +0,0 @@
-op {
- graph_op_name: "MatMul"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MatrixBandPart"
- endpoint {
- name: "linalg.band_part"
- }
- endpoint {
- name: "matrix_band_part"
- }
-}
-op {
- graph_op_name: "MatrixDeterminant"
- endpoint {
- name: "linalg.det"
- }
- endpoint {
- name: "matrix_determinant"
- }
-}
-op {
- graph_op_name: "MatrixDiag"
- endpoint {
- name: "linalg.diag"
- }
- endpoint {
- name: "matrix_diag"
- }
-}
-op {
- graph_op_name: "MatrixDiagPart"
- endpoint {
- name: "linalg.diag_part"
- }
- endpoint {
- name: "matrix_diag_part"
- }
-}
-op {
- graph_op_name: "MatrixInverse"
- endpoint {
- name: "linalg.inv"
- }
- endpoint {
- name: "matrix_inverse"
- }
-}
-op {
- graph_op_name: "MatrixSetDiag"
- endpoint {
- name: "linalg.set_diag"
- }
- endpoint {
- name: "matrix_set_diag"
- }
-}
-op {
- graph_op_name: "MatrixSolve"
- endpoint {
- name: "linalg.solve"
- }
- endpoint {
- name: "matrix_solve"
- }
-}
-op {
- graph_op_name: "MatrixSolveLs"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MatrixTriangularSolve"
- endpoint {
- name: "linalg.triangular_solve"
- }
- endpoint {
- name: "matrix_triangular_solve"
- }
-}
-op {
- graph_op_name: "Max"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MaxPool"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MaxPool3DGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MaxPool3DGradGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MaxPoolGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MaxPoolGradGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MaxPoolGradGradWithArgmax"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MaxPoolGradWithArgmax"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MaxPoolV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MaxPoolWithArgmax"
- endpoint {
- name: "nn.max_pool_with_argmax"
- }
-}
-op {
- graph_op_name: "Mean"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Merge"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MergeSummary"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Min"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MirrorPad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MirrorPadGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Mul"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MutableDenseHashTable"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MutableDenseHashTableV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MutableHashTable"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MutableHashTableOfTensors"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MutableHashTableOfTensorsV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "MutableHashTableV2"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatMul.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatMul.pbtxt
new file mode 100644
index 0000000000..ce95f857be
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MatMul.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MatMul"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixBandPart.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixBandPart.pbtxt
new file mode 100644
index 0000000000..89b1c1f5a9
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixBandPart.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "MatrixBandPart"
+ endpoint {
+ name: "linalg.band_part"
+ }
+ endpoint {
+ name: "matrix_band_part"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixDeterminant.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixDeterminant.pbtxt
new file mode 100644
index 0000000000..4d289f542f
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixDeterminant.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "MatrixDeterminant"
+ endpoint {
+ name: "linalg.det"
+ }
+ endpoint {
+ name: "matrix_determinant"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixDiag.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixDiag.pbtxt
new file mode 100644
index 0000000000..fd9d34635e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixDiag.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "MatrixDiag"
+ endpoint {
+ name: "linalg.diag"
+ }
+ endpoint {
+ name: "matrix_diag"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixDiagPart.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixDiagPart.pbtxt
new file mode 100644
index 0000000000..fa5d1f10af
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixDiagPart.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "MatrixDiagPart"
+ endpoint {
+ name: "linalg.diag_part"
+ }
+ endpoint {
+ name: "matrix_diag_part"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixInverse.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixInverse.pbtxt
new file mode 100644
index 0000000000..c0ddd73704
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixInverse.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "MatrixInverse"
+ endpoint {
+ name: "linalg.inv"
+ }
+ endpoint {
+ name: "matrix_inverse"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixSetDiag.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixSetDiag.pbtxt
new file mode 100644
index 0000000000..01f4f0e89d
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixSetDiag.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "MatrixSetDiag"
+ endpoint {
+ name: "linalg.set_diag"
+ }
+ endpoint {
+ name: "matrix_set_diag"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixSolve.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixSolve.pbtxt
new file mode 100644
index 0000000000..cef763e4e9
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixSolve.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "MatrixSolve"
+ endpoint {
+ name: "linalg.solve"
+ }
+ endpoint {
+ name: "matrix_solve"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixSolveLs.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixSolveLs.pbtxt
new file mode 100644
index 0000000000..f981161d49
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixSolveLs.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MatrixSolveLs"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MatrixTriangularSolve.pbtxt b/tensorflow/core/api_def/python_api/api_def_MatrixTriangularSolve.pbtxt
new file mode 100644
index 0000000000..a0d576aa31
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MatrixTriangularSolve.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "MatrixTriangularSolve"
+ endpoint {
+ name: "linalg.triangular_solve"
+ }
+ endpoint {
+ name: "matrix_triangular_solve"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Max.pbtxt b/tensorflow/core/api_def/python_api/api_def_Max.pbtxt
new file mode 100644
index 0000000000..bc369ea618
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Max.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Max"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MaxPool.pbtxt b/tensorflow/core/api_def/python_api/api_def_MaxPool.pbtxt
new file mode 100644
index 0000000000..e9712911c3
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MaxPool.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MaxPool"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MaxPool3DGrad.pbtxt b/tensorflow/core/api_def/python_api/api_def_MaxPool3DGrad.pbtxt
new file mode 100644
index 0000000000..315c5dfa82
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MaxPool3DGrad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MaxPool3DGrad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MaxPool3DGradGrad.pbtxt b/tensorflow/core/api_def/python_api/api_def_MaxPool3DGradGrad.pbtxt
new file mode 100644
index 0000000000..81f06ce4fb
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MaxPool3DGradGrad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MaxPool3DGradGrad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MaxPoolGradGrad.pbtxt b/tensorflow/core/api_def/python_api/api_def_MaxPoolGradGrad.pbtxt
new file mode 100644
index 0000000000..8ec9357bd2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MaxPoolGradGrad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MaxPoolGradGrad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MaxPoolGradGradWithArgmax.pbtxt b/tensorflow/core/api_def/python_api/api_def_MaxPoolGradGradWithArgmax.pbtxt
new file mode 100644
index 0000000000..25ec5a4121
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MaxPoolGradGradWithArgmax.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MaxPoolGradGradWithArgmax"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MaxPoolV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_MaxPoolV2.pbtxt
new file mode 100644
index 0000000000..0999b80d7b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MaxPoolV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MaxPoolV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MaxPoolWithArgmax.pbtxt b/tensorflow/core/api_def/python_api/api_def_MaxPoolWithArgmax.pbtxt
new file mode 100644
index 0000000000..7d8abca5f1
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MaxPoolWithArgmax.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "MaxPoolWithArgmax"
+ endpoint {
+ name: "nn.max_pool_with_argmax"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Mean.pbtxt b/tensorflow/core/api_def/python_api/api_def_Mean.pbtxt
new file mode 100644
index 0000000000..b4aa5d8582
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Mean.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Mean"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Merge.pbtxt b/tensorflow/core/api_def/python_api/api_def_Merge.pbtxt
new file mode 100644
index 0000000000..059c3d127a
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Merge.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Merge"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MergeSummary.pbtxt b/tensorflow/core/api_def/python_api/api_def_MergeSummary.pbtxt
new file mode 100644
index 0000000000..72095c5f91
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MergeSummary.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MergeSummary"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Min.pbtxt b/tensorflow/core/api_def/python_api/api_def_Min.pbtxt
new file mode 100644
index 0000000000..8172b4f6df
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Min.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Min"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MirrorPad.pbtxt b/tensorflow/core/api_def/python_api/api_def_MirrorPad.pbtxt
new file mode 100644
index 0000000000..67aebb8e86
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MirrorPad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MirrorPad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Mul.pbtxt b/tensorflow/core/api_def/python_api/api_def_Mul.pbtxt
new file mode 100644
index 0000000000..fd08acd752
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Mul.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Mul"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MutableDenseHashTable.pbtxt b/tensorflow/core/api_def/python_api/api_def_MutableDenseHashTable.pbtxt
new file mode 100644
index 0000000000..0c3921654e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MutableDenseHashTable.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MutableDenseHashTable"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MutableDenseHashTableV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_MutableDenseHashTableV2.pbtxt
new file mode 100644
index 0000000000..3aa6f69096
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MutableDenseHashTableV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MutableDenseHashTableV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MutableHashTable.pbtxt b/tensorflow/core/api_def/python_api/api_def_MutableHashTable.pbtxt
new file mode 100644
index 0000000000..d4566b96bd
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MutableHashTable.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MutableHashTable"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MutableHashTableOfTensors.pbtxt b/tensorflow/core/api_def/python_api/api_def_MutableHashTableOfTensors.pbtxt
new file mode 100644
index 0000000000..aad491fd45
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MutableHashTableOfTensors.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MutableHashTableOfTensors"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MutableHashTableOfTensorsV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_MutableHashTableOfTensorsV2.pbtxt
new file mode 100644
index 0000000000..133ae60428
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MutableHashTableOfTensorsV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MutableHashTableOfTensorsV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_MutableHashTableV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_MutableHashTableV2.pbtxt
new file mode 100644
index 0000000000..1f7be9df10
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_MutableHashTableV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "MutableHashTableV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_N.pbtxt b/tensorflow/core/api_def/python_api/api_def_N.pbtxt
deleted file mode 100644
index 60da4dcafe..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_N.pbtxt
+++ /dev/null
@@ -1,16 +0,0 @@
-op {
- graph_op_name: "Neg"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "NegTrain"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "NonMaxSuppression"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "NonMaxSuppressionV2"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_Neg.pbtxt b/tensorflow/core/api_def/python_api/api_def_Neg.pbtxt
new file mode 100644
index 0000000000..0e2bb9b950
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Neg.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Neg"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_NegTrain.pbtxt b/tensorflow/core/api_def/python_api/api_def_NegTrain.pbtxt
new file mode 100644
index 0000000000..0d536b4eaa
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_NegTrain.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "NegTrain"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_NonMaxSuppression.pbtxt b/tensorflow/core/api_def/python_api/api_def_NonMaxSuppression.pbtxt
new file mode 100644
index 0000000000..cdd122dc2b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_NonMaxSuppression.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "NonMaxSuppression"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_NonMaxSuppressionV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_NonMaxSuppressionV2.pbtxt
new file mode 100644
index 0000000000..ddbf2ec74e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_NonMaxSuppressionV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "NonMaxSuppressionV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_O.pbtxt b/tensorflow/core/api_def/python_api/api_def_OneHot.pbtxt
index 3a9f0f4032..3a9f0f4032 100644
--- a/tensorflow/core/api_def/python_api/api_def_O.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_OneHot.pbtxt
diff --git a/tensorflow/core/api_def/python_api/api_def_P.pbtxt b/tensorflow/core/api_def/python_api/api_def_P.pbtxt
deleted file mode 100644
index 87ca53e0b9..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_P.pbtxt
+++ /dev/null
@@ -1,68 +0,0 @@
-op {
- graph_op_name: "Pack"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Pad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "PadV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "PaddingFIFOQueue"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "PaddingFIFOQueueV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ParallelConcat"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ParameterizedTruncatedNormal"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ParseExample"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ParseSingleSequenceExample"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Placeholder"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Pow"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Print"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "PriorityQueue"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "PriorityQueueV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Prod"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "PyFunc"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "PyFuncStateless"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_Pack.pbtxt b/tensorflow/core/api_def/python_api/api_def_Pack.pbtxt
new file mode 100644
index 0000000000..cf7929e49a
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Pack.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Pack"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Pad.pbtxt b/tensorflow/core/api_def/python_api/api_def_Pad.pbtxt
new file mode 100644
index 0000000000..f9c04ee59b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Pad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Pad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_PadV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_PadV2.pbtxt
new file mode 100644
index 0000000000..e580992fb2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_PadV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "PadV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_PaddingFIFOQueue.pbtxt b/tensorflow/core/api_def/python_api/api_def_PaddingFIFOQueue.pbtxt
new file mode 100644
index 0000000000..575392b8b2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_PaddingFIFOQueue.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "PaddingFIFOQueue"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_PaddingFIFOQueueV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_PaddingFIFOQueueV2.pbtxt
new file mode 100644
index 0000000000..b37b4162c8
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_PaddingFIFOQueueV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "PaddingFIFOQueueV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ParallelConcat.pbtxt b/tensorflow/core/api_def/python_api/api_def_ParallelConcat.pbtxt
new file mode 100644
index 0000000000..8117b085be
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ParallelConcat.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ParallelConcat"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ParameterizedTruncatedNormal.pbtxt b/tensorflow/core/api_def/python_api/api_def_ParameterizedTruncatedNormal.pbtxt
new file mode 100644
index 0000000000..75444351fa
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ParameterizedTruncatedNormal.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ParameterizedTruncatedNormal"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ParseExample.pbtxt b/tensorflow/core/api_def/python_api/api_def_ParseExample.pbtxt
new file mode 100644
index 0000000000..c68a58d311
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ParseExample.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ParseExample"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ParseSingleSequenceExample.pbtxt b/tensorflow/core/api_def/python_api/api_def_ParseSingleSequenceExample.pbtxt
new file mode 100644
index 0000000000..5b47f452dd
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ParseSingleSequenceExample.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ParseSingleSequenceExample"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Placeholder.pbtxt b/tensorflow/core/api_def/python_api/api_def_Placeholder.pbtxt
new file mode 100644
index 0000000000..8c70d9cfe0
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Placeholder.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Placeholder"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Pow.pbtxt b/tensorflow/core/api_def/python_api/api_def_Pow.pbtxt
new file mode 100644
index 0000000000..bee695149a
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Pow.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Pow"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Print.pbtxt b/tensorflow/core/api_def/python_api/api_def_Print.pbtxt
new file mode 100644
index 0000000000..7854d4c727
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Print.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Print"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_PriorityQueue.pbtxt b/tensorflow/core/api_def/python_api/api_def_PriorityQueue.pbtxt
new file mode 100644
index 0000000000..96d0e9bedc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_PriorityQueue.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "PriorityQueue"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_PriorityQueueV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_PriorityQueueV2.pbtxt
new file mode 100644
index 0000000000..f640692ff8
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_PriorityQueueV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "PriorityQueueV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Prod.pbtxt b/tensorflow/core/api_def/python_api/api_def_Prod.pbtxt
new file mode 100644
index 0000000000..9801fc0f05
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Prod.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Prod"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_PyFunc.pbtxt b/tensorflow/core/api_def/python_api/api_def_PyFunc.pbtxt
new file mode 100644
index 0000000000..df9e876f2b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_PyFunc.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "PyFunc"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_PyFuncStateless.pbtxt b/tensorflow/core/api_def/python_api/api_def_PyFuncStateless.pbtxt
new file mode 100644
index 0000000000..50c8d1a096
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_PyFuncStateless.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "PyFuncStateless"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Q.pbtxt b/tensorflow/core/api_def/python_api/api_def_Q.pbtxt
deleted file mode 100644
index 0dfb5bb703..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_Q.pbtxt
+++ /dev/null
@@ -1,83 +0,0 @@
-op {
- graph_op_name: "Qr"
- endpoint {
- name: "linalg.qr"
- }
- endpoint {
- name: "qr"
- }
-}
-op {
- graph_op_name: "QuantizedAvgPool"
- endpoint {
- name: "nn.quantized_avg_pool"
- }
-}
-op {
- graph_op_name: "QuantizedMaxPool"
- endpoint {
- name: "nn.quantized_max_pool"
- }
-}
-op {
- graph_op_name: "QuantizedReluX"
- endpoint {
- name: "nn.quantized_relu_x"
- }
-}
-op {
- graph_op_name: "QueueClose"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueCloseV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueDequeue"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueDequeueMany"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueDequeueManyV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueDequeueUpTo"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueDequeueUpToV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueDequeueV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueEnqueue"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueEnqueueMany"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueEnqueueManyV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueEnqueueV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueSize"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "QueueSizeV2"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_Qr.pbtxt b/tensorflow/core/api_def/python_api/api_def_Qr.pbtxt
new file mode 100644
index 0000000000..b19da0d817
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Qr.pbtxt
@@ -0,0 +1,9 @@
+op {
+ graph_op_name: "Qr"
+ endpoint {
+ name: "linalg.qr"
+ }
+ endpoint {
+ name: "qr"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QuantizedAvgPool.pbtxt b/tensorflow/core/api_def/python_api/api_def_QuantizedAvgPool.pbtxt
new file mode 100644
index 0000000000..dfa793a16e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QuantizedAvgPool.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "QuantizedAvgPool"
+ endpoint {
+ name: "nn.quantized_avg_pool"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QuantizedMaxPool.pbtxt b/tensorflow/core/api_def/python_api/api_def_QuantizedMaxPool.pbtxt
new file mode 100644
index 0000000000..3a58590f57
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QuantizedMaxPool.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "QuantizedMaxPool"
+ endpoint {
+ name: "nn.quantized_max_pool"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QuantizedReluX.pbtxt b/tensorflow/core/api_def/python_api/api_def_QuantizedReluX.pbtxt
new file mode 100644
index 0000000000..926ec98eeb
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QuantizedReluX.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "QuantizedReluX"
+ endpoint {
+ name: "nn.quantized_relu_x"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueClose.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueClose.pbtxt
new file mode 100644
index 0000000000..7ab6f2f821
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueClose.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueClose"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueCloseV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueCloseV2.pbtxt
new file mode 100644
index 0000000000..0a00c3d78a
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueCloseV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueCloseV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueDequeue.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueDequeue.pbtxt
new file mode 100644
index 0000000000..ad2e246e92
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueDequeue.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueDequeue"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueDequeueMany.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueDequeueMany.pbtxt
new file mode 100644
index 0000000000..ff6a6e47a4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueDequeueMany.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueDequeueMany"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueDequeueManyV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueDequeueManyV2.pbtxt
new file mode 100644
index 0000000000..30ed19a210
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueDequeueManyV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueDequeueManyV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueDequeueUpTo.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueDequeueUpTo.pbtxt
new file mode 100644
index 0000000000..34b59952a2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueDequeueUpTo.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueDequeueUpTo"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueDequeueUpToV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueDequeueUpToV2.pbtxt
new file mode 100644
index 0000000000..fd0cd2500d
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueDequeueUpToV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueDequeueUpToV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueDequeueV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueDequeueV2.pbtxt
new file mode 100644
index 0000000000..3dfa758f1e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueDequeueV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueDequeueV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueEnqueue.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueEnqueue.pbtxt
new file mode 100644
index 0000000000..0a3698fd30
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueEnqueue.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueEnqueue"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueEnqueueMany.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueEnqueueMany.pbtxt
new file mode 100644
index 0000000000..a6bab13c9d
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueEnqueueMany.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueEnqueueMany"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueEnqueueManyV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueEnqueueManyV2.pbtxt
new file mode 100644
index 0000000000..a70b2019a5
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueEnqueueManyV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueEnqueueManyV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueEnqueueV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueEnqueueV2.pbtxt
new file mode 100644
index 0000000000..a06d0a3856
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueEnqueueV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueEnqueueV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueSize.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueSize.pbtxt
new file mode 100644
index 0000000000..25e881d381
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueSize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueSize"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_QueueSizeV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_QueueSizeV2.pbtxt
new file mode 100644
index 0000000000..b33b4e804a
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_QueueSizeV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "QueueSizeV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_R.pbtxt b/tensorflow/core/api_def/python_api/api_def_R.pbtxt
deleted file mode 100644
index 0c8a8a4d42..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_R.pbtxt
+++ /dev/null
@@ -1,192 +0,0 @@
-op {
- graph_op_name: "RGBToHSV"
- endpoint {
- name: "image.rgb_to_hsv"
- }
-}
-op {
- graph_op_name: "RandomCrop"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RandomGamma"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RandomPoisson"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RandomShuffle"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RandomShuffleQueue"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RandomShuffleQueueV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RandomStandardNormal"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RandomUniform"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RandomUniformInt"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Range"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderNumRecordsProduced"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderNumRecordsProducedV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderNumWorkUnitsCompleted"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderNumWorkUnitsCompletedV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderRead"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderReadUpTo"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderReadUpToV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderReadV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderReset"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderResetV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderRestoreState"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderRestoreStateV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderSerializeState"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReaderSerializeStateV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RealDiv"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReciprocalGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RefExit"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RefIdentity"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RefMerge"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Relu"
- endpoint {
- name: "nn.relu"
- }
-}
-op {
- graph_op_name: "Relu6"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Relu6Grad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ReluGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ResizeArea"
- endpoint {
- name: "image.resize_area"
- }
-}
-op {
- graph_op_name: "ResizeBicubic"
- endpoint {
- name: "image.resize_bicubic"
- }
-}
-op {
- graph_op_name: "ResizeBicubicGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ResizeBilinear"
- endpoint {
- name: "image.resize_bilinear"
- }
-}
-op {
- graph_op_name: "ResizeBilinearGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ResizeNearestNeighbor"
- endpoint {
- name: "image.resize_nearest_neighbor"
- }
-}
-op {
- graph_op_name: "ResizeNearestNeighborGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Restore"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RestoreSlice"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Reverse"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "RsqrtGrad"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_RGBToHSV.pbtxt b/tensorflow/core/api_def/python_api/api_def_RGBToHSV.pbtxt
new file mode 100644
index 0000000000..5676391e19
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RGBToHSV.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "RGBToHSV"
+ endpoint {
+ name: "image.rgb_to_hsv"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RandomCrop.pbtxt b/tensorflow/core/api_def/python_api/api_def_RandomCrop.pbtxt
new file mode 100644
index 0000000000..f2da1712a2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RandomCrop.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RandomCrop"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RandomGamma.pbtxt b/tensorflow/core/api_def/python_api/api_def_RandomGamma.pbtxt
new file mode 100644
index 0000000000..23509d8d61
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RandomGamma.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RandomGamma"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RandomPoisson.pbtxt b/tensorflow/core/api_def/python_api/api_def_RandomPoisson.pbtxt
new file mode 100644
index 0000000000..a7da239cb6
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RandomPoisson.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RandomPoisson"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RandomShuffle.pbtxt b/tensorflow/core/api_def/python_api/api_def_RandomShuffle.pbtxt
new file mode 100644
index 0000000000..4e265c8b4e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RandomShuffle.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RandomShuffle"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RandomShuffleQueue.pbtxt b/tensorflow/core/api_def/python_api/api_def_RandomShuffleQueue.pbtxt
new file mode 100644
index 0000000000..be93d99e84
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RandomShuffleQueue.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RandomShuffleQueue"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RandomShuffleQueueV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_RandomShuffleQueueV2.pbtxt
new file mode 100644
index 0000000000..afef017682
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RandomShuffleQueueV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RandomShuffleQueueV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RandomStandardNormal.pbtxt b/tensorflow/core/api_def/python_api/api_def_RandomStandardNormal.pbtxt
new file mode 100644
index 0000000000..3faff40d8a
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RandomStandardNormal.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RandomStandardNormal"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RandomUniform.pbtxt b/tensorflow/core/api_def/python_api/api_def_RandomUniform.pbtxt
new file mode 100644
index 0000000000..b36975ca76
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RandomUniform.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RandomUniform"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RandomUniformInt.pbtxt b/tensorflow/core/api_def/python_api/api_def_RandomUniformInt.pbtxt
new file mode 100644
index 0000000000..c1da295232
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RandomUniformInt.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RandomUniformInt"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Range.pbtxt b/tensorflow/core/api_def/python_api/api_def_Range.pbtxt
new file mode 100644
index 0000000000..48b0e9dda4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Range.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Range"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderNumRecordsProduced.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderNumRecordsProduced.pbtxt
new file mode 100644
index 0000000000..0849191a0c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderNumRecordsProduced.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderNumRecordsProduced"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderNumRecordsProducedV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderNumRecordsProducedV2.pbtxt
new file mode 100644
index 0000000000..ad4acb68bc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderNumRecordsProducedV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderNumRecordsProducedV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderNumWorkUnitsCompleted.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderNumWorkUnitsCompleted.pbtxt
new file mode 100644
index 0000000000..283d3ce1d4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderNumWorkUnitsCompleted.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderNumWorkUnitsCompleted"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt
new file mode 100644
index 0000000000..94a1af4912
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderNumWorkUnitsCompletedV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderNumWorkUnitsCompletedV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderRead.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderRead.pbtxt
new file mode 100644
index 0000000000..f0e248dfe4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderRead.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderRead"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderReadUpTo.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderReadUpTo.pbtxt
new file mode 100644
index 0000000000..5e2502b22e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderReadUpTo.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderReadUpTo"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderReadUpToV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderReadUpToV2.pbtxt
new file mode 100644
index 0000000000..43b375c69f
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderReadUpToV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderReadUpToV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderReadV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderReadV2.pbtxt
new file mode 100644
index 0000000000..ef3500df06
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderReadV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderReadV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderReset.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderReset.pbtxt
new file mode 100644
index 0000000000..679b1caec0
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderReset.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderReset"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderResetV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderResetV2.pbtxt
new file mode 100644
index 0000000000..59453c479c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderResetV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderResetV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderRestoreState.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderRestoreState.pbtxt
new file mode 100644
index 0000000000..3075388c62
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderRestoreState.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderRestoreState"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderRestoreStateV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderRestoreStateV2.pbtxt
new file mode 100644
index 0000000000..0edc9e2f24
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderRestoreStateV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderRestoreStateV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderSerializeState.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderSerializeState.pbtxt
new file mode 100644
index 0000000000..b766ce93af
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderSerializeState.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderSerializeState"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReaderSerializeStateV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReaderSerializeStateV2.pbtxt
new file mode 100644
index 0000000000..9e1247eec6
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReaderSerializeStateV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ReaderSerializeStateV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RealDiv.pbtxt b/tensorflow/core/api_def/python_api/api_def_RealDiv.pbtxt
new file mode 100644
index 0000000000..bd87eef824
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RealDiv.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RealDiv"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Relu.pbtxt b/tensorflow/core/api_def/python_api/api_def_Relu.pbtxt
new file mode 100644
index 0000000000..64c61f4ecf
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Relu.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "Relu"
+ endpoint {
+ name: "nn.relu"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Relu6.pbtxt b/tensorflow/core/api_def/python_api/api_def_Relu6.pbtxt
new file mode 100644
index 0000000000..8a132abdf3
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Relu6.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Relu6"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ResizeArea.pbtxt b/tensorflow/core/api_def/python_api/api_def_ResizeArea.pbtxt
new file mode 100644
index 0000000000..2f1b4aee00
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ResizeArea.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "ResizeArea"
+ endpoint {
+ name: "image.resize_area"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ResizeBicubic.pbtxt b/tensorflow/core/api_def/python_api/api_def_ResizeBicubic.pbtxt
new file mode 100644
index 0000000000..3ec8e0ad63
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ResizeBicubic.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "ResizeBicubic"
+ endpoint {
+ name: "image.resize_bicubic"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ResizeBilinear.pbtxt b/tensorflow/core/api_def/python_api/api_def_ResizeBilinear.pbtxt
new file mode 100644
index 0000000000..eb3b8d6f45
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ResizeBilinear.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "ResizeBilinear"
+ endpoint {
+ name: "image.resize_bilinear"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ResizeNearestNeighbor.pbtxt b/tensorflow/core/api_def/python_api/api_def_ResizeNearestNeighbor.pbtxt
new file mode 100644
index 0000000000..25c5d5701f
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ResizeNearestNeighbor.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "ResizeNearestNeighbor"
+ endpoint {
+ name: "image.resize_nearest_neighbor"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Restore.pbtxt b/tensorflow/core/api_def/python_api/api_def_Restore.pbtxt
new file mode 100644
index 0000000000..2ec456467d
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Restore.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Restore"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RestoreSlice.pbtxt b/tensorflow/core/api_def/python_api/api_def_RestoreSlice.pbtxt
new file mode 100644
index 0000000000..f188a291e6
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RestoreSlice.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "RestoreSlice"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Reverse.pbtxt b/tensorflow/core/api_def/python_api/api_def_Reverse.pbtxt
new file mode 100644
index 0000000000..e2cad1a557
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Reverse.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Reverse"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ReverseV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_ReverseV2.pbtxt
new file mode 100644
index 0000000000..8307a3c2dd
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ReverseV2.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "ReverseV2"
+ endpoint {
+ name: "reverse_v2"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_RightShift.pbtxt b/tensorflow/core/api_def/python_api/api_def_RightShift.pbtxt
new file mode 100644
index 0000000000..4d82c3d7e7
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_RightShift.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "RightShift"
+ endpoint {
+ name: "bitwise.right_shift"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_S.pbtxt b/tensorflow/core/api_def/python_api/api_def_S.pbtxt
deleted file mode 100644
index 0c34730200..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_S.pbtxt
+++ /dev/null
@@ -1,252 +0,0 @@
-op {
- graph_op_name: "SampleDistortedBoundingBox"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SampleDistortedBoundingBoxV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Save"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SaveSlices"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ScalarSummary"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SdcaFprint"
- endpoint {
- name: "train.sdca_fprint"
- }
-}
-op {
- graph_op_name: "SdcaOptimizer"
- endpoint {
- name: "train.sdca_optimizer"
- }
-}
-op {
- graph_op_name: "SdcaShrinkL1"
- endpoint {
- name: "train.sdca_shrink_l1"
- }
-}
-op {
- graph_op_name: "Select"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SelfAdjointEig"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SelfAdjointEigV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Selu"
- endpoint {
- name: "nn.selu"
- }
-}
-op {
- graph_op_name: "SeluGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SerializeManySparse"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SerializeSparse"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ShardedFilename"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ShardedFilespec"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Sigmoid"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SigmoidGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Skipgram"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Slice"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Softmax"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SoftmaxCrossEntropyWithLogits"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Softplus"
- endpoint {
- name: "nn.softplus"
- }
-}
-op {
- graph_op_name: "SoftplusGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Softsign"
- endpoint {
- name: "nn.softsign"
- }
-}
-op {
- graph_op_name: "SoftsignGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SpaceToBatch"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseAdd"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseAddGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseConcat"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseCross"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseFillEmptyRows"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseFillEmptyRowsGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseMatMul"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseReorder"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseReshape"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseSoftmaxCrossEntropyWithLogits"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseSplit"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseTensorDenseAdd"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseTensorDenseMatMul"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SparseToDense"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Split"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SplitV"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SqrtGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Squeeze"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Stack"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "StackClose"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "StackCloseV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "StackPop"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "StackPopV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "StackPush"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "StackPushV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "StackV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "StringSplit"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Sub"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Sum"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Svd"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Switch"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "SymbolicGradient"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_SampleDistortedBoundingBox.pbtxt b/tensorflow/core/api_def/python_api/api_def_SampleDistortedBoundingBox.pbtxt
new file mode 100644
index 0000000000..20a155bd5f
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SampleDistortedBoundingBox.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SampleDistortedBoundingBox"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SampleDistortedBoundingBoxV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_SampleDistortedBoundingBoxV2.pbtxt
new file mode 100644
index 0000000000..cdd0797f6d
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SampleDistortedBoundingBoxV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SampleDistortedBoundingBoxV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Save.pbtxt b/tensorflow/core/api_def/python_api/api_def_Save.pbtxt
new file mode 100644
index 0000000000..e43730f6ff
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Save.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Save"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SaveSlices.pbtxt b/tensorflow/core/api_def/python_api/api_def_SaveSlices.pbtxt
new file mode 100644
index 0000000000..5861c84b27
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SaveSlices.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SaveSlices"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ScalarSummary.pbtxt b/tensorflow/core/api_def/python_api/api_def_ScalarSummary.pbtxt
new file mode 100644
index 0000000000..f4009af08c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ScalarSummary.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ScalarSummary"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SdcaFprint.pbtxt b/tensorflow/core/api_def/python_api/api_def_SdcaFprint.pbtxt
new file mode 100644
index 0000000000..60e249077f
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SdcaFprint.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "SdcaFprint"
+ endpoint {
+ name: "train.sdca_fprint"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SdcaOptimizer.pbtxt b/tensorflow/core/api_def/python_api/api_def_SdcaOptimizer.pbtxt
new file mode 100644
index 0000000000..5e8e95ee9d
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SdcaOptimizer.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "SdcaOptimizer"
+ endpoint {
+ name: "train.sdca_optimizer"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SdcaShrinkL1.pbtxt b/tensorflow/core/api_def/python_api/api_def_SdcaShrinkL1.pbtxt
new file mode 100644
index 0000000000..552a91fb7e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SdcaShrinkL1.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "SdcaShrinkL1"
+ endpoint {
+ name: "train.sdca_shrink_l1"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Select.pbtxt b/tensorflow/core/api_def/python_api/api_def_Select.pbtxt
new file mode 100644
index 0000000000..6e0f3dc4a2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Select.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Select"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SelfAdjointEig.pbtxt b/tensorflow/core/api_def/python_api/api_def_SelfAdjointEig.pbtxt
new file mode 100644
index 0000000000..febe9f2f5a
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SelfAdjointEig.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SelfAdjointEig"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SelfAdjointEigV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_SelfAdjointEigV2.pbtxt
new file mode 100644
index 0000000000..b9f3274882
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SelfAdjointEigV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SelfAdjointEigV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Selu.pbtxt b/tensorflow/core/api_def/python_api/api_def_Selu.pbtxt
new file mode 100644
index 0000000000..da9ad7ce34
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Selu.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "Selu"
+ endpoint {
+ name: "nn.selu"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SerializeManySparse.pbtxt b/tensorflow/core/api_def/python_api/api_def_SerializeManySparse.pbtxt
new file mode 100644
index 0000000000..b965a3ad43
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SerializeManySparse.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SerializeManySparse"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SerializeSparse.pbtxt b/tensorflow/core/api_def/python_api/api_def_SerializeSparse.pbtxt
new file mode 100644
index 0000000000..fe95f20302
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SerializeSparse.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SerializeSparse"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ShardedFilename.pbtxt b/tensorflow/core/api_def/python_api/api_def_ShardedFilename.pbtxt
new file mode 100644
index 0000000000..67b2ef6bfc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ShardedFilename.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ShardedFilename"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ShardedFilespec.pbtxt b/tensorflow/core/api_def/python_api/api_def_ShardedFilespec.pbtxt
new file mode 100644
index 0000000000..bb25f103f1
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ShardedFilespec.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ShardedFilespec"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Sigmoid.pbtxt b/tensorflow/core/api_def/python_api/api_def_Sigmoid.pbtxt
new file mode 100644
index 0000000000..4a6ffb7198
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Sigmoid.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Sigmoid"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Skipgram.pbtxt b/tensorflow/core/api_def/python_api/api_def_Skipgram.pbtxt
new file mode 100644
index 0000000000..2bc76069f2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Skipgram.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Skipgram"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Slice.pbtxt b/tensorflow/core/api_def/python_api/api_def_Slice.pbtxt
new file mode 100644
index 0000000000..12e7dcc203
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Slice.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Slice"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Softmax.pbtxt b/tensorflow/core/api_def/python_api/api_def_Softmax.pbtxt
new file mode 100644
index 0000000000..d8605c8ddd
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Softmax.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Softmax"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow/core/api_def/python_api/api_def_SoftmaxCrossEntropyWithLogits.pbtxt
new file mode 100644
index 0000000000..e30b5a4821
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SoftmaxCrossEntropyWithLogits.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SoftmaxCrossEntropyWithLogits"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Softplus.pbtxt b/tensorflow/core/api_def/python_api/api_def_Softplus.pbtxt
new file mode 100644
index 0000000000..2de56c27be
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Softplus.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "Softplus"
+ endpoint {
+ name: "nn.softplus"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Softsign.pbtxt b/tensorflow/core/api_def/python_api/api_def_Softsign.pbtxt
new file mode 100644
index 0000000000..b47412d135
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Softsign.pbtxt
@@ -0,0 +1,6 @@
+op {
+ graph_op_name: "Softsign"
+ endpoint {
+ name: "nn.softsign"
+ }
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SpaceToBatch.pbtxt b/tensorflow/core/api_def/python_api/api_def_SpaceToBatch.pbtxt
new file mode 100644
index 0000000000..2a26f9a3ec
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SpaceToBatch.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SpaceToBatch"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseAdd.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseAdd.pbtxt
new file mode 100644
index 0000000000..6de974fd27
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseAdd.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseAdd"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseAddGrad.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseAddGrad.pbtxt
new file mode 100644
index 0000000000..87c306aacc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseAddGrad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseAddGrad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseConcat.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseConcat.pbtxt
new file mode 100644
index 0000000000..3bae51fe23
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseConcat.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseConcat"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseCross.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseCross.pbtxt
new file mode 100644
index 0000000000..25506cbb31
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseCross.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseCross"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseFillEmptyRows.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseFillEmptyRows.pbtxt
new file mode 100644
index 0000000000..242e87af1e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseFillEmptyRows.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseFillEmptyRows"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseFillEmptyRowsGrad.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseFillEmptyRowsGrad.pbtxt
new file mode 100644
index 0000000000..1cb69c4804
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseFillEmptyRowsGrad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseFillEmptyRowsGrad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseMatMul.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseMatMul.pbtxt
new file mode 100644
index 0000000000..f0af41dbdb
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseMatMul.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseMatMul"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseReorder.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseReorder.pbtxt
new file mode 100644
index 0000000000..18be89eff4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseReorder.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseReorder"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseReshape.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseReshape.pbtxt
new file mode 100644
index 0000000000..010de3e4ad
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseReshape.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseReshape"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt
new file mode 100644
index 0000000000..06e461aaa7
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseSoftmaxCrossEntropyWithLogits.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseSoftmaxCrossEntropyWithLogits"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseSplit.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseSplit.pbtxt
new file mode 100644
index 0000000000..285fb96d45
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseSplit.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseSplit"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseTensorDenseAdd.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseTensorDenseAdd.pbtxt
new file mode 100644
index 0000000000..9b4b6b9232
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseTensorDenseAdd.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseTensorDenseAdd"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseTensorDenseMatMul.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseTensorDenseMatMul.pbtxt
new file mode 100644
index 0000000000..07878ed2e8
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseTensorDenseMatMul.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseTensorDenseMatMul"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SparseToDense.pbtxt b/tensorflow/core/api_def/python_api/api_def_SparseToDense.pbtxt
new file mode 100644
index 0000000000..8f5d6f1d96
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SparseToDense.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SparseToDense"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Split.pbtxt b/tensorflow/core/api_def/python_api/api_def_Split.pbtxt
new file mode 100644
index 0000000000..609fd3dc2a
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Split.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Split"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SplitV.pbtxt b/tensorflow/core/api_def/python_api/api_def_SplitV.pbtxt
new file mode 100644
index 0000000000..0ae6f36d1c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SplitV.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SplitV"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Squeeze.pbtxt b/tensorflow/core/api_def/python_api/api_def_Squeeze.pbtxt
new file mode 100644
index 0000000000..9f5697ca94
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Squeeze.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Squeeze"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Stack.pbtxt b/tensorflow/core/api_def/python_api/api_def_Stack.pbtxt
new file mode 100644
index 0000000000..4ee4f6288b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Stack.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Stack"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_StackClose.pbtxt b/tensorflow/core/api_def/python_api/api_def_StackClose.pbtxt
new file mode 100644
index 0000000000..086acaa534
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_StackClose.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "StackClose"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_StackCloseV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_StackCloseV2.pbtxt
new file mode 100644
index 0000000000..e450ce0047
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_StackCloseV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "StackCloseV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_StackPop.pbtxt b/tensorflow/core/api_def/python_api/api_def_StackPop.pbtxt
new file mode 100644
index 0000000000..59352ead76
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_StackPop.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "StackPop"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_StackPopV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_StackPopV2.pbtxt
new file mode 100644
index 0000000000..102fdd00b7
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_StackPopV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "StackPopV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_StackPush.pbtxt b/tensorflow/core/api_def/python_api/api_def_StackPush.pbtxt
new file mode 100644
index 0000000000..a83c24909c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_StackPush.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "StackPush"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_StackPushV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_StackPushV2.pbtxt
new file mode 100644
index 0000000000..fad442ada6
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_StackPushV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "StackPushV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_StackV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_StackV2.pbtxt
new file mode 100644
index 0000000000..31362f46f7
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_StackV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "StackV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_StringSplit.pbtxt b/tensorflow/core/api_def/python_api/api_def_StringSplit.pbtxt
new file mode 100644
index 0000000000..891ff7157a
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_StringSplit.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "StringSplit"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Sub.pbtxt b/tensorflow/core/api_def/python_api/api_def_Sub.pbtxt
new file mode 100644
index 0000000000..747b44d4fe
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Sub.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Sub"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Sum.pbtxt b/tensorflow/core/api_def/python_api/api_def_Sum.pbtxt
new file mode 100644
index 0000000000..68e3472181
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Sum.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Sum"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Svd.pbtxt b/tensorflow/core/api_def/python_api/api_def_Svd.pbtxt
new file mode 100644
index 0000000000..098180f8d9
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Svd.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Svd"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Switch.pbtxt b/tensorflow/core/api_def/python_api/api_def_Switch.pbtxt
new file mode 100644
index 0000000000..2087c860b4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Switch.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Switch"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_SymbolicGradient.pbtxt b/tensorflow/core/api_def/python_api/api_def_SymbolicGradient.pbtxt
new file mode 100644
index 0000000000..0f747f464b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_SymbolicGradient.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "SymbolicGradient"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_T.pbtxt b/tensorflow/core/api_def/python_api/api_def_T.pbtxt
deleted file mode 100644
index 8011a11243..0000000000
--- a/tensorflow/core/api_def/python_api/api_def_T.pbtxt
+++ /dev/null
@@ -1,196 +0,0 @@
-op {
- graph_op_name: "TFRecordReader"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TFRecordReaderV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TakeManySparseFromTensorsMap"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "Tanh"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TanhGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TemporaryVariable"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArray"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayClose"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayCloseV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayCloseV3"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayConcat"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayConcatV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayConcatV3"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayGather"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayGatherV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayGatherV3"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayGradV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayGradV3"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayPack"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayRead"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayReadV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayReadV3"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayScatter"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayScatterV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayScatterV3"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArraySize"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArraySizeV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArraySizeV3"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArraySplit"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArraySplitV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArraySplitV3"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayUnpack"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayV3"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayWrite"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayWriteV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorArrayWriteV3"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorSummary"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TensorSummaryV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TextLineReader"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TextLineReaderV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "ThreadUnsafeUnigramCandidateSampler"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TileGrad"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TopK"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TopKV2"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TruncateDiv"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TruncateMod"
- visibility: HIDDEN
-}
-op {
- graph_op_name: "TruncatedNormal"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_TFRecordReader.pbtxt b/tensorflow/core/api_def/python_api/api_def_TFRecordReader.pbtxt
new file mode 100644
index 0000000000..1ec8bee340
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TFRecordReader.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TFRecordReader"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TFRecordReaderV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TFRecordReaderV2.pbtxt
new file mode 100644
index 0000000000..e1cda01a6e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TFRecordReaderV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TFRecordReaderV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TakeManySparseFromTensorsMap.pbtxt b/tensorflow/core/api_def/python_api/api_def_TakeManySparseFromTensorsMap.pbtxt
new file mode 100644
index 0000000000..842419cc25
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TakeManySparseFromTensorsMap.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TakeManySparseFromTensorsMap"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Tanh.pbtxt b/tensorflow/core/api_def/python_api/api_def_Tanh.pbtxt
new file mode 100644
index 0000000000..c946e0a794
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Tanh.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Tanh"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TemporaryVariable.pbtxt b/tensorflow/core/api_def/python_api/api_def_TemporaryVariable.pbtxt
new file mode 100644
index 0000000000..a9201b4fec
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TemporaryVariable.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TemporaryVariable"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArray.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArray.pbtxt
new file mode 100644
index 0000000000..f7288b85d7
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArray.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArray"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayClose.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayClose.pbtxt
new file mode 100644
index 0000000000..73e208459c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayClose.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayClose"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayCloseV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayCloseV2.pbtxt
new file mode 100644
index 0000000000..6c6955f8c7
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayCloseV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayCloseV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayCloseV3.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayCloseV3.pbtxt
new file mode 100644
index 0000000000..d95854cefb
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayCloseV3.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayCloseV3"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayConcat.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayConcat.pbtxt
new file mode 100644
index 0000000000..3695a787b9
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayConcat.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayConcat"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayConcatV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayConcatV2.pbtxt
new file mode 100644
index 0000000000..ac103d3c48
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayConcatV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayConcatV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayConcatV3.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayConcatV3.pbtxt
new file mode 100644
index 0000000000..54cdd3b949
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayConcatV3.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayConcatV3"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayGather.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayGather.pbtxt
new file mode 100644
index 0000000000..82a98fe7f5
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayGather.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayGather"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayGatherV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayGatherV2.pbtxt
new file mode 100644
index 0000000000..b7fc8541dd
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayGatherV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayGatherV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayGatherV3.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayGatherV3.pbtxt
new file mode 100644
index 0000000000..08dcedb8b7
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayGatherV3.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayGatherV3"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayGrad.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayGrad.pbtxt
new file mode 100644
index 0000000000..04b614d22c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayGrad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayGrad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayGradV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayGradV2.pbtxt
new file mode 100644
index 0000000000..bf204dde36
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayGradV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayGradV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayGradV3.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayGradV3.pbtxt
new file mode 100644
index 0000000000..75aaf6126e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayGradV3.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayGradV3"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayPack.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayPack.pbtxt
new file mode 100644
index 0000000000..f60367c1fc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayPack.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayPack"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayRead.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayRead.pbtxt
new file mode 100644
index 0000000000..e6c4ccdcf5
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayRead.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayRead"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayReadV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayReadV2.pbtxt
new file mode 100644
index 0000000000..be19fe86fa
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayReadV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayReadV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayReadV3.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayReadV3.pbtxt
new file mode 100644
index 0000000000..d024f420bc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayReadV3.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayReadV3"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayScatter.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayScatter.pbtxt
new file mode 100644
index 0000000000..8cdbb22af5
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayScatter.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayScatter"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayScatterV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayScatterV2.pbtxt
new file mode 100644
index 0000000000..02e16b1407
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayScatterV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayScatterV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayScatterV3.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayScatterV3.pbtxt
new file mode 100644
index 0000000000..8d262cc665
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayScatterV3.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayScatterV3"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArraySize.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArraySize.pbtxt
new file mode 100644
index 0000000000..169e495c4f
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArraySize.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArraySize"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArraySizeV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArraySizeV2.pbtxt
new file mode 100644
index 0000000000..d0dbd0d813
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArraySizeV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArraySizeV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArraySizeV3.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArraySizeV3.pbtxt
new file mode 100644
index 0000000000..f4e656f5cc
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArraySizeV3.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArraySizeV3"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArraySplit.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArraySplit.pbtxt
new file mode 100644
index 0000000000..1d9c53b2b4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArraySplit.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArraySplit"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArraySplitV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArraySplitV2.pbtxt
new file mode 100644
index 0000000000..502c78d83e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArraySplitV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArraySplitV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArraySplitV3.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArraySplitV3.pbtxt
new file mode 100644
index 0000000000..faefa0fac2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArraySplitV3.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArraySplitV3"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayUnpack.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayUnpack.pbtxt
new file mode 100644
index 0000000000..e5c0a794eb
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayUnpack.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayUnpack"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayV2.pbtxt
new file mode 100644
index 0000000000..6ad5c5f288
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayV3.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayV3.pbtxt
new file mode 100644
index 0000000000..da69f1513c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayV3.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayV3"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayWrite.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayWrite.pbtxt
new file mode 100644
index 0000000000..58d50cb7f2
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayWrite.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayWrite"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayWriteV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayWriteV2.pbtxt
new file mode 100644
index 0000000000..f07bb35017
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayWriteV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayWriteV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorArrayWriteV3.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorArrayWriteV3.pbtxt
new file mode 100644
index 0000000000..becc140401
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorArrayWriteV3.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorArrayWriteV3"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorSummary.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorSummary.pbtxt
new file mode 100644
index 0000000000..b5148e5d0c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorSummary.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorSummary"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TensorSummaryV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TensorSummaryV2.pbtxt
new file mode 100644
index 0000000000..6245bdce2b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TensorSummaryV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TensorSummaryV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TextLineReader.pbtxt b/tensorflow/core/api_def/python_api/api_def_TextLineReader.pbtxt
new file mode 100644
index 0000000000..7fa1f6a441
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TextLineReader.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TextLineReader"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TextLineReaderV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TextLineReaderV2.pbtxt
new file mode 100644
index 0000000000..cc506b39d6
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TextLineReaderV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TextLineReaderV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_ThreadUnsafeUnigramCandidateSampler.pbtxt b/tensorflow/core/api_def/python_api/api_def_ThreadUnsafeUnigramCandidateSampler.pbtxt
new file mode 100644
index 0000000000..7433d2f967
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_ThreadUnsafeUnigramCandidateSampler.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "ThreadUnsafeUnigramCandidateSampler"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TileGrad.pbtxt b/tensorflow/core/api_def/python_api/api_def_TileGrad.pbtxt
new file mode 100644
index 0000000000..1d2dce067b
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TileGrad.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TileGrad"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TopK.pbtxt b/tensorflow/core/api_def/python_api/api_def_TopK.pbtxt
new file mode 100644
index 0000000000..85ebb650e0
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TopK.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TopK"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TopKV2.pbtxt b/tensorflow/core/api_def/python_api/api_def_TopKV2.pbtxt
new file mode 100644
index 0000000000..671b04819c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TopKV2.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TopKV2"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TruncateDiv.pbtxt b/tensorflow/core/api_def/python_api/api_def_TruncateDiv.pbtxt
new file mode 100644
index 0000000000..2a547f771c
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TruncateDiv.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TruncateDiv"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TruncateMod.pbtxt b/tensorflow/core/api_def/python_api/api_def_TruncateMod.pbtxt
new file mode 100644
index 0000000000..0731e8810e
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TruncateMod.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TruncateMod"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_TruncatedNormal.pbtxt b/tensorflow/core/api_def/python_api/api_def_TruncatedNormal.pbtxt
new file mode 100644
index 0000000000..6003b2fdca
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_TruncatedNormal.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "TruncatedNormal"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_U.pbtxt b/tensorflow/core/api_def/python_api/api_def_UniformCandidateSampler.pbtxt
index d7c261c63c..6a73062b0a 100644
--- a/tensorflow/core/api_def/python_api/api_def_U.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_UniformCandidateSampler.pbtxt
@@ -2,7 +2,3 @@ op {
graph_op_name: "UniformCandidateSampler"
visibility: HIDDEN
}
-op {
- graph_op_name: "Unpack"
- visibility: HIDDEN
-}
diff --git a/tensorflow/core/api_def/python_api/api_def_Unpack.pbtxt b/tensorflow/core/api_def/python_api/api_def_Unpack.pbtxt
new file mode 100644
index 0000000000..30d7b7f734
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Unpack.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Unpack"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_Variable.pbtxt b/tensorflow/core/api_def/python_api/api_def_Variable.pbtxt
new file mode 100644
index 0000000000..7340d2a5c4
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_Variable.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "Variable"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_V.pbtxt b/tensorflow/core/api_def/python_api/api_def_VariableV2.pbtxt
index 18be21a886..7f63a57755 100644
--- a/tensorflow/core/api_def/python_api/api_def_V.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_VariableV2.pbtxt
@@ -1,8 +1,4 @@
op {
- graph_op_name: "Variable"
- visibility: HIDDEN
-}
-op {
graph_op_name: "VariableV2"
visibility: HIDDEN
}
diff --git a/tensorflow/core/api_def/python_api/api_def_WholeFileReader.pbtxt b/tensorflow/core/api_def/python_api/api_def_WholeFileReader.pbtxt
new file mode 100644
index 0000000000..d1cc7a0028
--- /dev/null
+++ b/tensorflow/core/api_def/python_api/api_def_WholeFileReader.pbtxt
@@ -0,0 +1,4 @@
+op {
+ graph_op_name: "WholeFileReader"
+ visibility: HIDDEN
+}
diff --git a/tensorflow/core/api_def/python_api/api_def_W.pbtxt b/tensorflow/core/api_def/python_api/api_def_WholeFileReaderV2.pbtxt
index cd8861a98f..48e7b1e0ec 100644
--- a/tensorflow/core/api_def/python_api/api_def_W.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_WholeFileReaderV2.pbtxt
@@ -1,8 +1,4 @@
op {
- graph_op_name: "WholeFileReader"
- visibility: HIDDEN
-}
-op {
graph_op_name: "WholeFileReaderV2"
visibility: HIDDEN
}
diff --git a/tensorflow/core/api_def/python_api/api_def_Z.pbtxt b/tensorflow/core/api_def/python_api/api_def_ZerosLike.pbtxt
index 5857b7cf38..5857b7cf38 100644
--- a/tensorflow/core/api_def/python_api/api_def_Z.pbtxt
+++ b/tensorflow/core/api_def/python_api/api_def_ZerosLike.pbtxt
diff --git a/tensorflow/core/common_runtime/gpu/gpu_device.cc b/tensorflow/core/common_runtime/gpu/gpu_device.cc
index 12d44cc6b7..fce8bc61f4 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_device.cc
+++ b/tensorflow/core/common_runtime/gpu/gpu_device.cc
@@ -1,4 +1,4 @@
-/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -652,6 +652,36 @@ Status BaseGPUDeviceFactory::CreateDevices(const SessionOptions& options,
if (static_cast<size_t>(n) > valid_gpu_ids.size()) {
n = valid_gpu_ids.size();
}
+ if (!valid_gpu_ids.empty()) {
+ // Save the original device.
+ int original_device = 0;
+ cudaError_t err = cudaGetDevice(&original_device);
+ if (err != cudaSuccess) {
+ return errors::Internal("cudaGetDevice() failed. Status: ",
+ cudaGetErrorString(err));
+ }
+ // Force to implicitly initialize CUDA runtime on each valid GPU before
+ // CreateGPUDevice().
+ for (int gpu_id : valid_gpu_ids) {
+ err = cudaSetDevice(gpu_id);
+ if (err != cudaSuccess) {
+ return errors::Internal("cudaSetDevice() on GPU:", gpu_id,
+ " failed. Status: ", cudaGetErrorString(err));
+ }
+ err = cudaFree(nullptr);
+ if (err != cudaSuccess) {
+ return errors::Internal(
+ "CUDA runtime implicit initialization on GPU:", gpu_id,
+ " failed. Status: ", cudaGetErrorString(err));
+ }
+ }
+ // Reset to the original device.
+ err = cudaSetDevice(original_device);
+ if (err != cudaSuccess) {
+ return errors::Internal("cudaSetDevice() on GPU:", original_device,
+ " failed. Status: ", cudaGetErrorString(err));
+ }
+ }
for (int i = 0; i < n; i++) {
BaseGPUDevice* gpu_device;
TF_RETURN_IF_ERROR(CreateGPUDevice(
diff --git a/tensorflow/core/framework/shape_inference.h b/tensorflow/core/framework/shape_inference.h
index d1b610d682..b68e6100df 100644
--- a/tensorflow/core/framework/shape_inference.h
+++ b/tensorflow/core/framework/shape_inference.h
@@ -71,6 +71,7 @@ class DimensionHandle {
friend class ShapeInferenceTestutil;
friend class ::tensorflow::ShapeRefinerTest;
friend class ShapeManager;
+ friend class ::tensorflow::grappler::GraphProperties;
// Intentionally copyable.
};
diff --git a/tensorflow/core/graph/graph_constructor.cc b/tensorflow/core/graph/graph_constructor.cc
index 8fe4f535fb..753cb260e5 100644
--- a/tensorflow/core/graph/graph_constructor.cc
+++ b/tensorflow/core/graph/graph_constructor.cc
@@ -75,6 +75,7 @@ class GraphConstructor {
prefix(in.prefix.empty() || StringPiece(in.prefix).ends_with("/")
? in.prefix
: in.prefix + "/"),
+ uniquify_names(in.uniquify_names),
input_map(in.input_map),
skip_mapped_nodes(in.skip_mapped_nodes),
control_dependencies(in.control_dependencies),
@@ -86,6 +87,7 @@ class GraphConstructor {
bool expect_device_spec;
string prefix;
+ bool uniquify_names;
std::map<TensorId, TensorId> input_map;
bool skip_mapped_nodes;
std::vector<string> control_dependencies;
@@ -190,6 +192,20 @@ class GraphConstructor {
void AddPrefixToNodeDef(const std::vector<bool>& input_already_exists,
NodeDef* node_def);
+ // Modifies `node_def` if its name isn't unique, or if any of its inputs'
+ // names have been uniquified. This must be called in topological order on all
+ // nodes.
+ void UniquifyNames(const std::vector<bool>& input_already_exists,
+ NodeDef* node_def);
+
+ // Returns true if `name` already exists in `g_` (either as a node name or
+ // prefix).
+ bool NameExists(StringPiece name);
+
+ // Returns a unique version of `original_name`, or `original_name` if it's
+ // already unique in the graph.
+ string FindUniqueName(StringPiece original_name);
+
// From constructor
const Options opts_;
const NodeDefSlice node_defs_;
@@ -224,9 +240,16 @@ class GraphConstructor {
// alternative implementation of std::unordered_map.
std::unordered_map<StringPiece, NodeInfo, StringPiece::Hasher> gdef_nodes_;
- // Mapping from node name to the existing node in g_
+ // Mapping from node name to the existing node in g_.
std::unordered_map<StringPiece, Node*, StringPiece::Hasher> existing_nodes_;
+ // Prefixes already used in the graph.
+ std::unordered_set<StringPiece, StringPiece::Hasher> existing_prefixes_;
+
+ // Imported node names that have been uniquified. The key is the original
+ // name, the value is the new unique name.
+ std::unordered_map<string, string> uniquified_names_;
+
// Index of NodeDefs in node_defs_ with all inputs already converted.
std::vector<int> ready_;
@@ -281,6 +304,7 @@ bool NodeNameInValues(const std::vector<string>& control_dependencies,
Status GraphConstructor::EnsureNoNameCollisions() {
existing_nodes_.reserve(g_->num_nodes());
+ // Populate existing_nodes_ and existing_prefixes_.
for (Node* n : g_->nodes()) {
bool already_exists = !existing_nodes_.insert({n->name(), n}).second;
if (already_exists) {
@@ -296,18 +320,22 @@ Status GraphConstructor::EnsureNoNameCollisions() {
n->name(), "'");
}
}
+ // Add all of node's prefixes to existing_prefixes_ (if it has any).
+ size_t idx = -1;
+ while ((idx = n->name().find('/', idx + 1)) != string::npos) {
+ StringPiece name(n->name());
+ existing_prefixes_.insert(name.substr(0, idx));
+ }
}
- if (opts_.prefix.empty() && opts_.importing) {
+ if (opts_.prefix.empty() && opts_.importing && !opts_.uniquify_names) {
for (const NodeDef* n : node_defs_) {
const string& name = n->name();
- if (existing_nodes_.find(name) != existing_nodes_.end()) {
- return errors::InvalidArgument("Node '", name,
+ if (NameExists(name)) {
+ return errors::InvalidArgument("Node name '", name,
"' already exists in the Graph");
}
}
} else if (!opts_.prefix.empty()) {
- // Importing nodes with a prefix. No nodes should exist with the same
- // prefix.
StringPiece prefix_no_slash(opts_.prefix);
prefix_no_slash.remove_suffix(1);
if (!IsValidNodeName(prefix_no_slash, false)) {
@@ -315,13 +343,11 @@ Status GraphConstructor::EnsureNoNameCollisions() {
opts_.prefix,
"' would lead to invalid node names");
}
- for (const Node* n : g_->nodes()) {
- if (StringPiece(n->name()).starts_with(opts_.prefix)) {
- return errors::InvalidArgument(
- "Import node name prefix conflicts with names of nodes already in "
- "the Graph, such as '",
- n->name(), "'");
- }
+ if (NameExists(prefix_no_slash)) {
+ return errors::InvalidArgument("Import node name prefix '",
+ prefix_no_slash,
+ "' conflicts with "
+ "name already used in the graph");
}
}
return Status::OK();
@@ -663,19 +689,18 @@ void GraphConstructor::AddControlDependencies(
void GraphConstructor::AddPrefixToNodeDef(
const std::vector<bool>& input_already_exists, NodeDef* node_def) {
- const string& prefix = opts_.prefix;
- if (prefix.empty()) return;
- node_def->set_name(strings::StrCat(prefix, node_def->name()));
+ if (opts_.prefix.empty()) return;
+ node_def->set_name(strings::StrCat(opts_.prefix, node_def->name()));
// Update names of input nodes
for (int i = 0; i < node_def->input_size(); ++i) {
StringPiece input(node_def->input(i));
// Skip remapped inputs (which already exist in g_ and are not being
- // imported)
+ // imported).
if (input_already_exists[i]) continue;
if (input.Consume("^")) {
- node_def->set_input(i, strings::StrCat("^", prefix, input));
+ node_def->set_input(i, strings::StrCat("^", opts_.prefix, input));
} else {
- node_def->set_input(i, strings::StrCat(prefix, input));
+ node_def->set_input(i, strings::StrCat(opts_.prefix, input));
}
}
// Update names of colocation groups
@@ -685,12 +710,62 @@ void GraphConstructor::AddPrefixToNodeDef(
for (int i = 0; i < list->s_size(); ++i) {
StringPiece v(list->s(i));
if (v.Consume(kColocationGroupPrefix)) {
- list->set_s(i, strings::StrCat(kColocationGroupPrefix, prefix, v));
+ list->set_s(i,
+ strings::StrCat(kColocationGroupPrefix, opts_.prefix, v));
}
}
}
}
+void GraphConstructor::UniquifyNames(
+ const std::vector<bool>& input_already_exists, NodeDef* node_def) {
+ if (NameExists(node_def->name())) {
+ string old_name = node_def->name();
+ node_def->set_name(FindUniqueName(node_def->name()));
+ uniquified_names_[old_name] = node_def->name();
+ }
+ for (int i = 0; i < node_def->input_size(); ++i) {
+ // Skip remapped inputs (which already exist in g_ and are not being
+ // imported).
+ if (input_already_exists[i]) continue;
+ TensorId id = ParseTensorName(node_def->input(i));
+ // We require that UniquifyNames() is called on all NodeDefs in topological
+ // order. This guarantees that node_def's inputs will already be uniquified
+ // if necessary.
+ auto iter = uniquified_names_.find(id.first.ToString());
+ if (iter == uniquified_names_.end()) continue;
+ id.first = iter->second;
+ node_def->set_input(i, id.ToString());
+ }
+ // Update names of colocation groups
+ if (node_def->attr().find(kColocationAttrName) != node_def->attr().end()) {
+ auto* list =
+ node_def->mutable_attr()->at(kColocationAttrName).mutable_list();
+ for (int i = 0; i < list->s_size(); ++i) {
+ StringPiece v(list->s(i));
+ if (v.Consume(kColocationGroupPrefix)) {
+ auto iter = uniquified_names_.find(v.ToString());
+ if (iter == uniquified_names_.end()) continue;
+ list->set_s(i, strings::StrCat(kColocationGroupPrefix, iter->second));
+ }
+ }
+ }
+}
+
+bool GraphConstructor::NameExists(StringPiece name) {
+ if (existing_nodes_.find(name) != existing_nodes_.end()) return true;
+ return existing_prefixes_.find(name) != existing_prefixes_.end();
+}
+
+string GraphConstructor::FindUniqueName(StringPiece original_name) {
+ string name = original_name.ToString();
+ int count = 1;
+ while (NameExists(name)) {
+ name = strings::StrCat(original_name, "_", count++);
+ }
+ return name;
+}
+
Status GraphConstructor::IsNodeFullyMapped(const NodeDef& node_def,
bool* is_node_mapped) {
const OpDef* op_def;
@@ -825,7 +900,11 @@ Status GraphConstructor::Convert() {
Node* node;
if (opts_.importing) {
- AddPrefixToNodeDef(input_already_exists, &imported_node_def);
+ if (!opts_.prefix.empty()) {
+ AddPrefixToNodeDef(input_already_exists, &imported_node_def);
+ } else if (opts_.uniquify_names) {
+ UniquifyNames(input_already_exists, &imported_node_def);
+ }
TF_RETURN_IF_ERROR(ModifyNodeDefForImport(&imported_node_def));
}
TF_RETURN_IF_ERROR(MakeNode(*node_def, &node));
diff --git a/tensorflow/core/graph/graph_constructor.h b/tensorflow/core/graph/graph_constructor.h
index a364478878..416c0ee9ae 100644
--- a/tensorflow/core/graph/graph_constructor.h
+++ b/tensorflow/core/graph/graph_constructor.h
@@ -54,13 +54,20 @@ extern Status ConvertNodeDefsToGraph(const GraphConstructorOptions& opts,
// Options for calling ImportGraphDef().
struct ImportGraphDefOptions {
- ImportGraphDefOptions() : skip_mapped_nodes(false) {}
+ ImportGraphDefOptions() : uniquify_names(false), skip_mapped_nodes(false) {}
// Name prefix to use for nodes imported from the GraphDef. For example, if
// prefix="animals" and GraphDef contains a node "bunny" then the node will be
- // named "animals/bunny" in *g.
+ // named "animals/bunny" in *g. Must not be already used as a node name or
+ // prefix in the graph.
string prefix;
+ // If true, imported node names will be modified if their name already exists
+ // in the graph. If false, conflicting names will be treated as an error. Note
+ // that this option has no effect if `prefix` is specified, since `prefix`
+ // will guarantee all node names are unique.
+ bool uniquify_names;
+
// Maps tensors in `gdef` to existing tensors in `g`. Inputs in `gdef`
// corresponding to `input_map` keys will be remapped to the nodes in `g`
// corresponding to the values.
diff --git a/tensorflow/core/graph/graph_constructor_test.cc b/tensorflow/core/graph/graph_constructor_test.cc
index 5242c56ce6..cd541c7d86 100644
--- a/tensorflow/core/graph/graph_constructor_test.cc
+++ b/tensorflow/core/graph/graph_constructor_test.cc
@@ -1731,6 +1731,136 @@ TEST_F(GraphConstructorTest, ImportGraphDef_ReturnNodesErrors) {
"currently supported"});
}
+TEST_F(GraphConstructorTest, ImportGraphDef_UniquifyNames) {
+ ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
+
+ const char* graph_def_str =
+ "node { name: 'A' op: 'TestInput' }"
+ "node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A'] }";
+
+ // Initial import
+ ImportGraphDefOptions opts;
+ opts.uniquify_names = true;
+ opts.return_nodes.push_back("A");
+ opts.return_nodes.push_back("B");
+ ImportGraphDefResults results;
+ ExpectOK(graph_def_str, opts, &refiner, &results);
+
+ ASSERT_EQ(results.return_nodes.size(), 2);
+ EXPECT_EQ(results.return_nodes[0]->name(), "A");
+ EXPECT_EQ(results.return_nodes[1]->name(), "B");
+ EXPECT_EQ(results.return_nodes[1]->def().input(0), "A");
+
+ // Repeat the same import
+ results = ImportGraphDefResults();
+ ExpectOK(graph_def_str, opts, &refiner, &results);
+
+ ASSERT_EQ(results.return_nodes.size(), 2);
+ EXPECT_EQ(results.return_nodes[0]->name(), "A_1");
+ EXPECT_EQ(results.return_nodes[1]->name(), "B_1");
+ EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_1:0");
+
+ // Repeat the same import again
+ results = ImportGraphDefResults();
+ ExpectOK(graph_def_str, opts, &refiner, &results);
+
+ ASSERT_EQ(results.return_nodes.size(), 2);
+ EXPECT_EQ(results.return_nodes[0]->name(), "A_2");
+ EXPECT_EQ(results.return_nodes[1]->name(), "B_2");
+ EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_2:0");
+
+ // Import with existing de-duped node names
+ opts = ImportGraphDefOptions();
+ opts.uniquify_names = true;
+ opts.return_nodes.push_back("A_1");
+ opts.return_nodes.push_back("B_1");
+ results = ImportGraphDefResults();
+ ExpectOK(
+ "node { name: 'A_1' op: 'TestInput' }"
+ "node { name: 'B_1' op: 'TestOneInputTwoOutputs' input: ['A_1:0'] }",
+ opts, &refiner, &results);
+
+ ASSERT_EQ(results.return_nodes.size(), 2);
+ EXPECT_EQ(results.return_nodes[0]->name(), "A_1_1");
+ EXPECT_EQ(results.return_nodes[1]->name(), "B_1_1");
+ EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_1_1:0");
+
+ // Create node with prefix and then import node with same name
+ ExpectOK("node { name: 'foo/abc' op: 'ABC' }");
+ opts = ImportGraphDefOptions();
+ opts.uniquify_names = true;
+ opts.return_nodes.push_back("foo");
+ results = ImportGraphDefResults();
+ ExpectOK("node { name: 'foo' op: 'TestInput' }", opts, &refiner, &results);
+
+ ASSERT_EQ(results.return_nodes.size(), 1);
+ EXPECT_EQ(results.return_nodes[0]->name(), "foo_1");
+
+ // Imported nodes can't conflict with intermediate name (but can conflict with
+ // outer name)
+ ExpectOK("node { name: 'outer/inner/abc' op: 'ABC' }");
+
+ opts = ImportGraphDefOptions();
+ opts.uniquify_names = true;
+ opts.return_nodes.push_back("outer");
+ opts.return_nodes.push_back("inner");
+ opts.return_nodes.push_back("abc");
+ opts.return_nodes.push_back("outer/inner");
+ opts.return_nodes.push_back("outer/inner/abc");
+ results = ImportGraphDefResults();
+ ExpectOK(
+ "node { name: 'outer' op: 'TestInput' }"
+ "node { name: 'inner' op: 'TestInput' }"
+ "node { name: 'abc' op: 'TestInput' }"
+ "node { name: 'outer/inner' op: 'TestInput' }"
+ "node { name: 'outer/inner/abc' op: 'TestInput' }",
+ opts, &refiner, &results);
+
+ ASSERT_EQ(results.return_nodes.size(), 5);
+ EXPECT_EQ(results.return_nodes[0]->name(), "outer_1");
+ EXPECT_EQ(results.return_nodes[1]->name(), "inner");
+ EXPECT_EQ(results.return_nodes[2]->name(), "abc");
+ EXPECT_EQ(results.return_nodes[3]->name(), "outer/inner_1");
+ EXPECT_EQ(results.return_nodes[4]->name(), "outer/inner/abc_1");
+
+ // Import with input map containing conflicting names
+ opts = ImportGraphDefOptions();
+ opts.uniquify_names = true;
+ opts.input_map[TensorId("A", 0)] = TensorId("A", 0);
+ opts.input_map[TensorId("B", 0)] = TensorId("B", 0);
+ opts.return_nodes.push_back("A");
+ opts.return_nodes.push_back("B");
+ results = ImportGraphDefResults();
+ ExpectOK(graph_def_str, opts, &refiner, &results);
+
+ ASSERT_EQ(results.return_nodes.size(), 2);
+ EXPECT_EQ(results.return_nodes[0]->name(), "A_3");
+ EXPECT_EQ(results.return_nodes[1]->name(), "B_3");
+ EXPECT_EQ(results.return_nodes[1]->def().input(0), "A:0");
+
+ // Check that colocation groups are updated
+ opts = ImportGraphDefOptions();
+ opts.uniquify_names = true;
+ opts.return_nodes.push_back("A");
+ opts.return_nodes.push_back("B");
+ results = ImportGraphDefResults();
+ ExpectOK(
+ "node { name: 'A' op: 'TestInput' }"
+ "node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A:0'] "
+ " attr { key: '_class' value { list { s:'loc:@A' } } } }",
+ opts, &refiner, &results);
+
+ ASSERT_EQ(results.return_nodes.size(), 2);
+ EXPECT_EQ(results.return_nodes[0]->name(), "A_4");
+ EXPECT_EQ(results.return_nodes[1]->name(), "B_4");
+ EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_4:0");
+ const AttrValue* class_attr =
+ results.return_nodes[1]->attrs().Find(kColocationAttrName);
+ ASSERT_TRUE(class_attr != nullptr);
+ ASSERT_EQ(class_attr->list().s_size(), 1);
+ EXPECT_EQ(class_attr->list().s(0), "loc:@A_4");
+}
+
TEST_F(GraphConstructorTest, ImportGraphDef_WithCycle) {
// Test graph produced in python using:
/*
@@ -2157,7 +2287,7 @@ TEST_F(GraphConstructorTest, ImportGraphDef_ErrorsDoNoChangeTheGraph) {
} while (0)
EXPECT_IMPORT_FAILURE(def, opts,
- "Node 'scope/A' already exists in the Graph");
+ "Node name 'scope/A' already exists in the Graph");
GraphDef bad_def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
@@ -2240,7 +2370,7 @@ TEST_F(GraphConstructorTest, ImportGraphDef_ErrorsDoNoChangeTheGraph) {
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{name:'scope/A' op:'TestParams'}", &bad_def));
EXPECT_IMPORT_FAILURE(bad_def, opts,
- "Node 'scope/A' already exists in the Graph");
+ "Node name 'scope/A' already exists in the Graph");
parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
diff --git a/tensorflow/core/grappler/costs/graph_properties.cc b/tensorflow/core/grappler/costs/graph_properties.cc
index e9cb2ee09d..67da0e3bca 100644
--- a/tensorflow/core/grappler/costs/graph_properties.cc
+++ b/tensorflow/core/grappler/costs/graph_properties.cc
@@ -394,6 +394,7 @@ Status GraphProperties::InferStatically() {
} while (!done);
}
+ std::unordered_map<const shape_inference::Dimension*, int> dim_ids;
for (const Node* const node : graph.nodes()) {
VLOG(1) << "<Node> " << node->name();
auto ctx = shape_refiner.GetContext(node);
@@ -412,7 +413,7 @@ Status GraphProperties::InferStatically() {
input_properties.resize(ctx->num_inputs());
for (int i = 0; i < ctx->num_inputs(); ++i) {
FillTensorPropertiesFromContext(ctx->input(i), node->input_type(i), ctx,
- &input_properties[i]);
+ &dim_ids, &input_properties[i]);
}
for (const auto& edge : node->in_edges()) {
if (!edge->src()->IsConstant()) {
@@ -439,7 +440,7 @@ Status GraphProperties::InferStatically() {
output_properties.resize(ctx->num_outputs());
for (int i = 0; i < ctx->num_outputs(); ++i) {
FillTensorPropertiesFromContext(ctx->output(i), node->output_type(i),
- ctx, &output_properties[i]);
+ ctx, &dim_ids, &output_properties[i]);
}
}
}
@@ -458,7 +459,7 @@ Status GraphProperties::InferDynamically(Cluster* cluster) {
return InferFromCostGraph(metadata.cost_graph());
}
-Status GraphProperties::AnnotateOutputShapes(GraphDef* output_graph_def) {
+Status GraphProperties::AnnotateOutputShapes(GraphDef* output_graph_def) const {
*output_graph_def = item_.graph;
for (int i = 0; i < output_graph_def->node_size(); i++) {
auto node = output_graph_def->mutable_node(i);
@@ -533,6 +534,7 @@ GraphProperties::GetOutputProperties(const string& node_name) const {
void GraphProperties::FillTensorPropertiesFromContext(
const ShapeHandle& shape, const DataType& type, InferenceContext* ctx,
+ std::unordered_map<const shape_inference::Dimension*, int>* dim_ids,
OpInfo::TensorProperties* properties) {
properties->set_dtype(type);
if (!ctx->RankKnown(shape)) {
@@ -541,6 +543,17 @@ void GraphProperties::FillTensorPropertiesFromContext(
for (int j = 0; j < ctx->Rank(shape); ++j) {
shape_inference::DimensionHandle dim = ctx->Dim(shape, j);
int64 d = ctx->Value(dim);
+ // Assign a negative id to unknown dimensions, starting at -2 (the -1 id
+ // reserved by TensorFlow).
+ if (d < 0) {
+ auto it = dim_ids->find(dim.ptr_);
+ if (it != dim_ids->end()) {
+ d = it->second;
+ } else {
+ d = -(dim_ids->size() + 2);
+ dim_ids->emplace(dim.ptr_, d);
+ }
+ }
properties->mutable_shape()->add_dim()->set_size(d);
}
}
diff --git a/tensorflow/core/grappler/costs/graph_properties.h b/tensorflow/core/grappler/costs/graph_properties.h
index 5649788be5..e2fe9f9689 100644
--- a/tensorflow/core/grappler/costs/graph_properties.h
+++ b/tensorflow/core/grappler/costs/graph_properties.h
@@ -40,8 +40,14 @@ class GraphProperties {
Status InferFromCostGraph(const CostGraphDef& cost_graph);
// Stores `item_.graph` with the inferred output shapes to `output_graph_def`.
- Status AnnotateOutputShapes(GraphDef* output_graph_def);
-
+ Status AnnotateOutputShapes(GraphDef* output_graph_def) const;
+
+ // Return the properties of node inputs/outputs, including data types and
+ // shapes. Note that the dimensions in the shapes can be negative. We use the
+ // -1 value to denote that we don't know anything about a dimension. We use
+ // values strictly less than -1 to encode symbolic dimensions: although we
+ // don't know the actual value of the symbolic dimension, we know that all the
+ // dimensions denoted by the same negative value are the equal.
bool HasInputProperties(const string& name) const;
bool HasOutputProperties(const string& name) const;
const std::vector<OpInfo::TensorProperties>& GetInputProperties(
@@ -51,7 +57,9 @@ class GraphProperties {
static void FillTensorPropertiesFromContext(
const shape_inference::ShapeHandle&, const DataType&,
- shape_inference::InferenceContext*, OpInfo::TensorProperties*);
+ shape_inference::InferenceContext*,
+ std::unordered_map<const shape_inference::Dimension*, int>* dim_ids,
+ OpInfo::TensorProperties*);
private:
// Inputs
diff --git a/tensorflow/core/grappler/costs/graph_properties_test.cc b/tensorflow/core/grappler/costs/graph_properties_test.cc
index 134db5ec5a..7fe7d5b511 100644
--- a/tensorflow/core/grappler/costs/graph_properties_test.cc
+++ b/tensorflow/core/grappler/costs/graph_properties_test.cc
@@ -54,7 +54,8 @@ class GraphPropertiesTest : public ::testing::Test {
} else {
strings::StrAppend(&s, "[");
for (int i = 0; i < p.shape().dim_size(); ++i) {
- strings::StrAppend(&s, i == 0 ? "" : ",", p.shape().dim(i).size());
+ strings::StrAppend(&s, i == 0 ? "" : ",",
+ std::max<int64>(p.shape().dim(i).size(), -1));
}
strings::StrAppend(&s, "]");
}
diff --git a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc
index a2fa847df2..bd84331b67 100644
--- a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc
+++ b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc
@@ -98,7 +98,7 @@ TensorShapeProto MaybeGetMinimumShape(const TensorShapeProto& original_shape,
}
} else {
for (int i = 0; i < shape.dim_size(); i++) {
- if (shape.dim(i).size() == -1) {
+ if (shape.dim(i).size() < 0) {
*found_unknown_shapes = true;
VLOG(2) << "Use minimum dim size 1 because the shape is unknown.";
// The size of each dimension is at least 1, if unknown.
diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
index f1c31ebb25..2306e9f513 100644
--- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
+++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include <limits>
#include <unordered_map>
#include <unordered_set>
+#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
@@ -30,9 +31,11 @@ limitations under the License.
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/lib/core/errors.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/tensor_coding.h"
#include "tensorflow/core/util/device_name_utils.h"
+#include "tensorflow/core/util/saved_tensor_slice_util.h"
namespace tensorflow {
namespace grappler {
@@ -77,14 +80,14 @@ Status SetTensorValue(DataType dtype, int value, Tensor* tensor) {
return Status::OK();
}
-static bool IsInvolution(const NodeDef& node) {
+bool IsInvolution(const NodeDef& node) {
const std::unordered_set<string> involution_ops = {"Conj", "Reciprocal",
"Neg", "LogicalNot"};
return involution_ops.count(node.op()) > 0;
}
-bool AreInversePermutations(gtl::ArraySlice<int32> a,
- gtl::ArraySlice<int32> b) {
+template <typename T>
+bool AreInversePermutations(const std::vector<T>& a, const std::vector<T>& b) {
if (a.size() != b.size()) {
return false;
}
@@ -96,46 +99,81 @@ bool AreInversePermutations(gtl::ArraySlice<int32> a,
return true;
}
-// Extract int32 values from a Const op to `int32_values`. Returns true if
-// succeeds.
-bool Int32ValuesFromNode(const NodeDef& node, std::vector<int>* int32_values) {
+// Extract values from a Const op to `values`. Returns true if succeeds.
+template <typename T>
+bool ValuesFromConstNode(const NodeDef& node, std::vector<T>* values) {
if (node.op() != "Const") {
return false;
}
- if (node.attr().at("dtype").type() != DT_INT32) {
+ if (node.attr().at("dtype").type() != DataTypeToEnum<T>::value) {
return false;
}
// TensorProto represents the content of the tensor in either <type>_val or
// tensor_content.
const TensorProto& tensor = node.attr().at("value").tensor();
- if (tensor.int_val_size() > 0 && tensor.has_tensor_shape()) {
+ typename checkpoint::SaveTypeTraits<T>::RepeatedField* tensor_values =
+ checkpoint::MutableTensorProtoData<T>(const_cast<TensorProto*>(&tensor));
+
+ if (!tensor_values->empty() && tensor.has_tensor_shape()) {
// When tensor_shape is set, theoretically the representation of the data
- // could be compressed. So, before copying int_val to the returned vector,
+ // could be compressed. So, before copying values to the returned vector,
// make sure no compression happens.
const TensorShapeProto& shape = tensor.tensor_shape();
- if (shape.dim_size() == 1 && shape.dim(0).size() == tensor.int_val_size()) {
- int32_values->insert(int32_values->end(), tensor.int_val().begin(),
- tensor.int_val().end());
+ if (shape.dim_size() == 1 && shape.dim(0).size() == tensor_values->size()) {
+ values->insert(values->end(), tensor_values->begin(),
+ tensor_values->end());
+ return true;
}
- return true;
}
const auto tensor_content_size = tensor.tensor_content().size();
if (tensor_content_size > 0) {
- CHECK_EQ(0, tensor_content_size % sizeof(int32))
+ CHECK_EQ(0, tensor_content_size % sizeof(T))
<< "tensor_content_size (" << tensor_content_size
- << ") is not a multiple of " << sizeof(int32);
- int32_values->resize(tensor_content_size / sizeof(int32));
+ << ") is not a multiple of " << sizeof(T);
+ values->resize(tensor_content_size / sizeof(T));
port::CopyToArray(tensor.tensor_content(),
- reinterpret_cast<char*>(int32_values->data()));
+ reinterpret_cast<char*>(values->data()));
return true;
}
return false;
}
+template <typename T>
+bool IsInnerMatrixTranspose(const std::vector<T>& perm) {
+ const T n = perm.size();
+ if (n < 2) {
+ return false;
+ }
+ for (T i = 0; i < n - 2; ++i) {
+ if (perm[i] != i) {
+ return false;
+ }
+ }
+ return perm[n - 1] == n - 2 && perm[n - 2] == n - 1;
+}
+
+bool IsInnerMatrixTransposeNode(const NodeDef& transpose_node,
+ const NodeMap* node_map) {
+ if (transpose_node.op() != "Transpose" &&
+ transpose_node.op() != "ConjugateTranspose") {
+ return false;
+ }
+ const NodeDef* perm_node = node_map->GetNode(transpose_node.input(1));
+ std::vector<int> perm32;
+ if (ValuesFromConstNode(*perm_node, &perm32)) {
+ return IsInnerMatrixTranspose(perm32);
+ }
+ std::vector<int64> perm64;
+ if (ValuesFromConstNode(*perm_node, &perm64)) {
+ return IsInnerMatrixTranspose(perm64);
+ }
+ return false;
+}
+
bool SimplyReordersData(const NodeDef& node) {
return node.op() == "Transpose";
}
@@ -181,6 +219,12 @@ void SetDataTypeToAttr(DataType dtype, const string& attr_name, NodeDef* node) {
(*node->mutable_attr())[attr_name].set_type(dtype);
}
+void FlipBooleanAttr(const string& attr_name, NodeDef* node) {
+ const bool old_value =
+ !node->attr().count(attr_name) ? false : node->attr().at(attr_name).b();
+ (*node->mutable_attr())[attr_name].set_b(!old_value);
+}
+
string SourceDataTypeAttrName(const NodeDef& node) {
if (node.op() == "Bitcast") {
return "T";
@@ -497,13 +541,22 @@ string ArithmeticOptimizer::TrySimplifyAndReplaceUses(
if (input->op() == node->op()) {
const NodeDef* node_perm = node_map->GetNode(node->input(1));
const NodeDef* input_perm = node_map->GetNode(input->input(1));
+ // Try 32-bit indices.
std::vector<int> node_perm_values;
std::vector<int> input_perm_values;
- if (Int32ValuesFromNode(*node_perm, &node_perm_values) &&
- Int32ValuesFromNode(*input_perm, &input_perm_values) &&
+ if (ValuesFromConstNode(*node_perm, &node_perm_values) &&
+ ValuesFromConstNode(*input_perm, &input_perm_values) &&
AreInversePermutations(node_perm_values, input_perm_values)) {
return input->input(0);
}
+ // Try 64-bit indices.
+ std::vector<int64> node_perm_values64;
+ std::vector<int64> input_perm_values64;
+ if (ValuesFromConstNode(*node_perm, &node_perm_values64) &&
+ ValuesFromConstNode(*input_perm, &input_perm_values64) &&
+ AreInversePermutations(node_perm_values64, input_perm_values64)) {
+ return input->input(0);
+ }
}
}
@@ -865,12 +918,60 @@ string ArithmeticOptimizer::TrySimplifyAndReplaceUses(
}
}
- // Fuse ops by absorbing Conj into Transpose or ConjugateTranspose.
+ // Fold Transpose into matrix multiplication.
+ if (node->op() == "MatMul" || node->op() == "SparseMatMul" ||
+ node->op() == "BatchMatMul") {
+ const NodeDef* a = node_map->GetNode(node->input(0));
+ const NodeDef* b = node_map->GetNode(node->input(1));
+ bool is_complex = false;
+ if (node->op() != "SparseMatMul") {
+ const DataType type = GetDataTypeFromAttr(*node, "T");
+ is_complex = (type == DT_COMPLEX64) || (type == DT_COMPLEX128);
+ }
+ const std::set<string> foldable_transpose_ops =
+ !is_complex ? std::set<string>{"ConjugateTranspose", "Transpose"}
+ : (node->op() == "BatchMatMul"
+ ? std::set<string>{"ConjugateTranspose"}
+ : std::set<string>{"Transpose"});
+ const bool a_is_foldable = foldable_transpose_ops.count(a->op()) > 0 &&
+ IsInnerMatrixTransposeNode(*a, node_map);
+ const bool b_is_foldable = foldable_transpose_ops.count(b->op()) > 0 &&
+ IsInnerMatrixTransposeNode(*b, node_map);
+ if (a_is_foldable || b_is_foldable) {
+ NodeDef* new_op = graph_def->add_node();
+ *new_op = *node;
+ new_op->set_name(node->name() + "_fused");
+ node_map->AddNode(new_op->name(), new_op);
+ if (a_is_foldable) {
+ const string attr_a =
+ node->op() == "BatchMatMul" ? "adj_x" : "transpose_a";
+ FlipBooleanAttr(attr_a, new_op);
+ new_op->set_input(0, a->input(0));
+ node_map->UpdateInput(new_op->name(), a->name(), a->input(0));
+ AddFrameControlDeps(node, {new_op}, a->input(0), {new_op}, graph_def,
+ node_map, frame_map);
+ }
+ if (b_is_foldable) {
+ const string attr_b =
+ node->op() == "BatchMatMul" ? "adj_y" : "transpose_b";
+ FlipBooleanAttr(attr_b, new_op);
+ new_op->set_input(1, b->input(0));
+ node_map->UpdateInput(new_op->name(), b->name(), b->input(0));
+ if (!a_is_foldable) {
+ AddFrameControlDeps(node, {new_op}, b->input(0), {new_op}, graph_def,
+ node_map, frame_map);
+ }
+ }
+ }
+ }
+
+ // Fold Conj into Transpose or ConjugateTranspose.
if (node->op() == "Conj" || node->op() == "Transpose" ||
node->op() == "ConjugateTranspose") {
const NodeDef* input = node_map->GetNode(node->input(0));
const NodeDef* transpose_op = node->op() == "Conj" ? input : node;
const NodeDef* conj_op = node->op() == "Conj" ? node : input;
+
if ((transpose_op->op() == "Transpose" ||
transpose_op->op() == "ConjugateTranspose") &&
conj_op->op() == "Conj") {
diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc
index c1535886d1..cef3ed9ce1 100644
--- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc
+++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc
@@ -145,7 +145,6 @@ TEST_F(ArithmeticOptimizerTest, SimplifyReplaceTrivialSums) {
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
- // VLOG(2) << output.DebugString();
EXPECT_EQ(5, output.node_size());
const NodeDef& new_const = output.node(3);
EXPECT_EQ("add_const", new_const.name());
@@ -176,7 +175,6 @@ TEST_F(ArithmeticOptimizerTest, SimplifyHoistFactor) {
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
- LOG(INFO) << output.DebugString();
EXPECT_EQ(9, output.node_size());
const NodeDef& new_add = output.node(8);
EXPECT_EQ("add_hoist", new_add.name());
@@ -206,7 +204,6 @@ TEST_F(ArithmeticOptimizerTest, FuseConjAndTranspose) {
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
- LOG(INFO) << output.DebugString();
EXPECT_EQ(7, output.node_size());
EXPECT_EQ("trans_fused", output.node(6).name());
@@ -231,7 +228,6 @@ TEST_F(ArithmeticOptimizerTest, FuseConjAndConjugateTranspose) {
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
- LOG(INFO) << output.DebugString();
EXPECT_EQ(7, output.node_size());
EXPECT_EQ("conjugate_trans_fused", output.node(6).name());
@@ -255,7 +251,6 @@ TEST_F(ArithmeticOptimizerTest, FuseTransposeAndConj) {
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
- LOG(INFO) << output.DebugString();
EXPECT_EQ(7, output.node_size());
EXPECT_EQ("conj_fused", output.node(6).name());
@@ -264,6 +259,77 @@ TEST_F(ArithmeticOptimizerTest, FuseTransposeAndConj) {
EXPECT_EQ("perm", output.node(6).input(1));
}
+TEST_F(ArithmeticOptimizerTest, FoldTransposeIntoMatMul) {
+ for (const string matmul_type : {"MatMul", "SparseMatMul", "BatchMatMul"}) {
+ tensorflow::Scope s = tensorflow::Scope::NewRootScope();
+ Output a = ops::Const(s.WithOpName("a"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
+ Output b = ops::Const(s.WithOpName("b"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
+ Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
+ Output trans_a = ops::Transpose(s.WithOpName("trans_a"), a, perm);
+ Output trans_b = ops::Transpose(s.WithOpName("trans_b"), b, perm);
+ if (matmul_type == "MatMul") {
+ Output matmul = ops::MatMul(s.WithOpName("matmul"), trans_a, trans_b);
+ } else if (matmul_type == "SparseMatMul") {
+ Output matmul =
+ ops::SparseMatMul(s.WithOpName("matmul"), trans_a, trans_b);
+ } else if (matmul_type == "BatchMatMul") {
+ Output matmul =
+ ops::BatchMatMul(s.WithOpName("matmul"), trans_a, trans_b);
+ }
+ GrapplerItem item;
+ TF_CHECK_OK(s.ToGraphDef(&item.graph));
+
+ ArithmeticOptimizer optimizer;
+ GraphDef output;
+ Status status = optimizer.Optimize(nullptr, item, &output);
+ TF_EXPECT_OK(status);
+
+ EXPECT_EQ(7, output.node_size());
+ EXPECT_EQ("matmul_fused", output.node(6).name());
+ EXPECT_EQ("a", output.node(6).input(0));
+ EXPECT_EQ("b", output.node(6).input(1));
+ if (matmul_type == "BatchMatMul") {
+ EXPECT_TRUE(output.node(6).attr().at("adj_x").b());
+ EXPECT_TRUE(output.node(6).attr().at("adj_y").b());
+ } else {
+ EXPECT_TRUE(output.node(6).attr().at("transpose_a").b());
+ EXPECT_TRUE(output.node(6).attr().at("transpose_b").b());
+ }
+ }
+}
+
+TEST_F(ArithmeticOptimizerTest, FoldConjugateTransposeIntoBatchMatMul) {
+ tensorflow::Scope s = tensorflow::Scope::NewRootScope();
+ Output re_a =
+ ops::Const(s.WithOpName("re_a"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
+ Output im_a =
+ ops::Const(s.WithOpName("im_a"), {-1.0f, -2.0f, -3.0f, -4.0f}, {2, 2});
+ Output re_b =
+ ops::Const(s.WithOpName("re_b"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
+ Output im_b =
+ ops::Const(s.WithOpName("im_b"), {-5.0f, -6.0f, -7.0f, -8.0f}, {2, 2});
+ Output a = ops::Complex(s.WithOpName("a"), re_a, im_a);
+ Output b = ops::Complex(s.WithOpName("b"), re_b, im_b);
+ Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
+ Output trans_a = ops::ConjugateTranspose(s.WithOpName("trans_a"), a, perm);
+ Output trans_b = ops::ConjugateTranspose(s.WithOpName("trans_b"), b, perm);
+ Output matmul = ops::BatchMatMul(s.WithOpName("matmul"), trans_a, trans_b);
+ GrapplerItem item;
+ TF_CHECK_OK(s.ToGraphDef(&item.graph));
+
+ ArithmeticOptimizer optimizer;
+ GraphDef output;
+ Status status = optimizer.Optimize(nullptr, item, &output);
+ TF_EXPECT_OK(status);
+
+ EXPECT_EQ(11, output.node_size());
+ EXPECT_EQ("matmul_fused", output.node(10).name());
+ EXPECT_EQ("a", output.node(10).input(0));
+ EXPECT_EQ("b", output.node(10).input(1));
+ EXPECT_TRUE(output.node(10).attr().at("adj_x").b());
+ EXPECT_TRUE(output.node(10).attr().at("adj_y").b());
+}
+
TEST_F(ArithmeticOptimizerTest, IdentityReshape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
diff --git a/tensorflow/core/grappler/optimizers/constant_folding.cc b/tensorflow/core/grappler/optimizers/constant_folding.cc
index e8ffff07c6..cb02314183 100644
--- a/tensorflow/core/grappler/optimizers/constant_folding.cc
+++ b/tensorflow/core/grappler/optimizers/constant_folding.cc
@@ -914,8 +914,8 @@ Status ConstantFolding::RunOptimizationPass(Cluster* cluster,
// new names, and as a result users would not be able to fetch the node any
// more with the original node name.
for (const auto& fetch : item.fetch) {
- auto fetch_node = node_map_->GetNode(fetch);
- if (NumOutputs(*fetch_node) == 1) {
+ const NodeDef* fetch_node = node_map_->GetNode(fetch);
+ if (fetch_node && NumOutputs(*fetch_node) == 1) {
nodes_whitelist_.insert(fetch_node->name());
}
}
diff --git a/tensorflow/core/grappler/optimizers/layout_optimizer.cc b/tensorflow/core/grappler/optimizers/layout_optimizer.cc
index 1ca296da0a..e2e4bc3de8 100644
--- a/tensorflow/core/grappler/optimizers/layout_optimizer.cc
+++ b/tensorflow/core/grappler/optimizers/layout_optimizer.cc
@@ -20,7 +20,6 @@ limitations under the License.
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
-#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
@@ -61,23 +60,12 @@ std::set<string> GetOpsFormatSupported() {
}
std::set<string> GetOpsFormatAgnostic() {
- std::set<string> ops_format_agnostic = {"Add",
- "AddN",
- "Concat",
- "ConcatV2",
- "Floor",
- "Identity",
- "Mul",
- "Neg",
- "RealDiv",
- "Relu",
- "Relu6",
- "ReluGrad",
- "Sigmoid",
- "Slice",
- "SquaredDifference",
- "Squeeze",
- "Sub"};
+ std::set<string> ops_format_agnostic = {
+ "Add", "AddN", "Concat", "ConcatV2",
+ "Floor", "Identity", "Mul", "Neg",
+ "Pad", "RealDiv", "Relu", "Relu6",
+ "ReluGrad", "Sigmoid", "Slice", "SquaredDifference",
+ "Squeeze", "Sub"};
return ops_format_agnostic;
}
@@ -279,10 +267,23 @@ class NodeProcessor : public GraphProcessor {
if (!success) {
LOG(ERROR) << "Failed to parse TensorProto.";
}
- int c = tensor.flat<int>()(3);
- tensor.flat<int>()(3) = tensor.flat<int>()(2);
- tensor.flat<int>()(2) = tensor.flat<int>()(1);
- tensor.flat<int>()(1) = c;
+ if (tensor.dims() == 1) {
+ int c = tensor.flat<int>()(3);
+ tensor.flat<int>()(3) = tensor.flat<int>()(2);
+ tensor.flat<int>()(2) = tensor.flat<int>()(1);
+ tensor.flat<int>()(1) = c;
+ } else if (tensor.dims() == 2) {
+ for (int i = 0; i < 2; i++) {
+ int c = tensor.matrix<int>()(3, i);
+ tensor.matrix<int>()(3, i) = tensor.matrix<int>()(2, i);
+ tensor.matrix<int>()(2, i) = tensor.matrix<int>()(1, i);
+ tensor.matrix<int>()(1, i) = c;
+ }
+ } else {
+ return Status(
+ error::INVALID_ARGUMENT,
+ strings::StrCat("Unsupported dimension size: ", tensor.dims()));
+ }
tensor.AsProtoTensorContent(
node->mutable_attr()->at({"value"}).mutable_tensor());
return Status::OK();
@@ -290,6 +291,8 @@ class NodeProcessor : public GraphProcessor {
Status UpdateAttrValueOfInput(int input_index) {
auto input_node = node_map_->GetNode(node_->input(input_index));
+ // We created a copy of the node, so that we don't modify the original node,
+ // which might be used elsewhere.
NodeDef* added_node = graph_->add_node();
*added_node = *input_node;
string base_name = strings::StrCat(node_->name(), "-", input_node->name());
@@ -876,6 +879,38 @@ class ConcatProcessor : public AgnosticNodeProcessor {
}
};
+class PadProcessor : public AgnosticNodeProcessor {
+ public:
+ PadProcessor(GraphDef* graph, NodeDef* node, NodeMap* node_map,
+ bool is_in_frame)
+ : AgnosticNodeProcessor(graph, node, node_map, is_in_frame) {}
+
+ protected:
+ bool ShouldProcess() const override {
+ return IsDimsFour(*node_) && HasOutputs() && IsNodeAfterNCHWToNHWC() &&
+ PaddingSupported();
+ }
+ Status CustomizedProcessing() override { return UpdateAttrValueOfInput(1); }
+
+ private:
+ bool PaddingSupported() const {
+ auto pad_const = node_map_->GetNode(node_->input(1));
+ bool is_const = IsConstant(*pad_const);
+ bool is_4D = false;
+ if (HasAttribute(*pad_const, "value").ok()) {
+ Tensor tensor;
+ if (tensor.FromProto(pad_const->mutable_attr()->at({"value"}).tensor())) {
+ if (tensor.dims() == 2) {
+ if (tensor.dim_size(0) == 4 && tensor.dim_size(1) == 2) {
+ is_4D = true;
+ }
+ }
+ }
+ }
+ return is_const && is_4D;
+ }
+};
+
class ReluGradProcessor : public AgnosticNodeProcessor {
public:
ReluGradProcessor(GraphDef* graph, NodeDef* node, NodeMap* node_map,
@@ -1179,21 +1214,11 @@ class SumProcessor : public AgnosticNodeProcessor {
}
};
-struct TuningConfig {
- // If true, do not use the NHWC GEMM implementation. When filter size is
- // one or filter size is equal to input image size,
- // the NHWC implementation of Conv2D, Conv2DBackpropInput, and
- // Conv2DBackpropFilter will use a specialized GEMM implementation, which is
- // usually faster than the NCHW implementation. The downside is that this
- // might result in more non-cancellable layout conversion nodes (implemented
- // by the Transpose op).
- bool no_gemm;
-};
-
class DataLayoutOptimizer : GraphProcessor {
public:
explicit DataLayoutOptimizer(const string& default_device, GraphDef* graph,
- NodeMap* node_map, TuningConfig config)
+ NodeMap* node_map,
+ LayoutOptimizer::TuningConfig config)
: GraphProcessor(graph, node_map),
default_device_(default_device),
config_(config) {}
@@ -1303,6 +1328,9 @@ class DataLayoutOptimizer : GraphProcessor {
node->op().compare("ConcatV2") == 0) {
node_processor.reset(
new ConcatProcessor(graph_, node, node_map_, is_in_frame));
+ } else if (node->op().compare("Pad") == 0) {
+ node_processor.reset(
+ new PadProcessor(graph_, node, node_map_, is_in_frame));
} else if (node->op().compare("ReluGrad") == 0) {
node_processor.reset(
new ReluGradProcessor(graph_, node, node_map_, is_in_frame));
@@ -1375,7 +1403,7 @@ class DataLayoutOptimizer : GraphProcessor {
}
string default_device_;
- TuningConfig config_;
+ LayoutOptimizer::TuningConfig config_;
};
int GetNumTranspose(const GraphDef& graph) {
@@ -1389,6 +1417,22 @@ int GetNumTranspose(const GraphDef& graph) {
return number;
}
+Status LayoutOptimizer::Tune(const GrapplerItem& item,
+ const GraphProperties& graph_properties,
+ const string& default_device,
+ const TuningConfig& config, GraphDef* output) {
+ auto status = graph_properties.AnnotateOutputShapes(output);
+ if (!status.ok()) {
+ *output = item.graph;
+ return status;
+ }
+ NodeMap node_map(output);
+ DataLayoutOptimizer layout_optimizer(default_device, output, &node_map,
+ config);
+ status = layout_optimizer.Optimize();
+ return status;
+}
+
Status LayoutOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
if (num_gpus_ == 0) {
@@ -1406,11 +1450,6 @@ Status LayoutOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
*output = item.graph;
return status;
}
- status = graph_properties.AnnotateOutputShapes(output);
- if (!status.ok()) {
- *output = item.graph;
- return status;
- }
TuningConfig config;
config.no_gemm = false;
@@ -1420,19 +1459,14 @@ Status LayoutOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
default_device = cluster->GetDevices().begin()->first;
}
}
- std::unique_ptr<NodeMap> node_map(new NodeMap(output));
- std::unique_ptr<DataLayoutOptimizer> layout_optimizer(
- new DataLayoutOptimizer(default_device, output, node_map.get(), config));
- status = layout_optimizer->Optimize();
+
+ status = Tune(item, graph_properties, default_device, config, output);
// This is based on an empirical observation that if the introduced Transpose
// nodes is more than 30, not using GEMM implementation would result in better
// performance.
if (status.ok() && GetNumTranspose(*output) > 30) {
config.no_gemm = true;
- node_map.reset(new NodeMap(output));
- layout_optimizer.reset(new DataLayoutOptimizer(default_device, output,
- node_map.get(), config));
- status = layout_optimizer->Optimize();
+ status = Tune(item, graph_properties, default_device, config, output);
}
if (!status.ok()) {
diff --git a/tensorflow/core/grappler/optimizers/layout_optimizer.h b/tensorflow/core/grappler/optimizers/layout_optimizer.h
index 1bd6f9544b..621c286976 100644
--- a/tensorflow/core/grappler/optimizers/layout_optimizer.h
+++ b/tensorflow/core/grappler/optimizers/layout_optimizer.h
@@ -16,11 +16,11 @@ limitations under the License.
#ifndef TENSORFLOW_GRAPPLER_OPTIMIZERS_LAYOUT_OPTIMIZER_H_
#define TENSORFLOW_GRAPPLER_OPTIMIZERS_LAYOUT_OPTIMIZER_H_
+#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
namespace tensorflow {
namespace grappler {
-
// Convert the NHWC layout to NCHW for Conv-related ops on GPUs.
class LayoutOptimizer : public GraphOptimizer {
public:
@@ -32,6 +32,17 @@ class LayoutOptimizer : public GraphOptimizer {
// This is for testing only.
void set_num_gpus(int num_gpus) { num_gpus_ = num_gpus; };
+ struct TuningConfig {
+ // If true, do not use the NHWC GEMM implementation. When filter size is
+ // one or filter size is equal to input image size,
+ // the NHWC implementation of Conv2D, Conv2DBackpropInput, and
+ // Conv2DBackpropFilter will use a specialized GEMM implementation, which is
+ // usually faster than the NCHW implementation. The downside is that this
+ // might result in more non-cancellable layout conversion nodes (implemented
+ // by the Transpose op).
+ bool no_gemm;
+ };
+
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) override;
@@ -40,6 +51,9 @@ class LayoutOptimizer : public GraphOptimizer {
private:
int num_gpus_ = 0;
+ Status Tune(const GrapplerItem& item, const GraphProperties& graph_properties,
+ const string& default_device, const TuningConfig& config,
+ GraphDef* output);
};
} // end namespace grappler
diff --git a/tensorflow/core/grappler/optimizers/layout_optimizer_test.cc b/tensorflow/core/grappler/optimizers/layout_optimizer_test.cc
index 7ebc9aaf1c..e9febd7e18 100644
--- a/tensorflow/core/grappler/optimizers/layout_optimizer_test.cc
+++ b/tensorflow/core/grappler/optimizers/layout_optimizer_test.cc
@@ -200,6 +200,34 @@ TEST_F(LayoutOptimizerTest, NotEqualSizeWithValidPadding) {
node_map.GetNode("LayoutOptimizerTransposeNHWCToNCHW-Conv2D-Input-0"));
}
+TEST_F(LayoutOptimizerTest, Pad) {
+ tensorflow::Scope s = tensorflow::Scope::NewRootScope();
+ auto conv = SimpleConv2D(&s, 3, 2, "VALID");
+ auto c = ops::Const(s.WithOpName("c"), {1, 2, 3, 4, 5, 6, 7, 8}, {4, 2});
+ auto p = ops::Pad(s.WithOpName("p"), conv, c);
+ auto o = ops::Identity(s.WithOpName("o"), p);
+ GrapplerItem item;
+ TF_CHECK_OK(s.ToGraphDef(&item.graph));
+ LayoutOptimizer optimizer;
+ optimizer.set_num_gpus(1);
+ GraphDef output;
+ Status status = optimizer.Optimize(nullptr, item, &output);
+ NodeMap node_map(&output);
+
+ auto pad = node_map.GetNode("p");
+ EXPECT_EQ(pad->input(0), "Conv2D");
+
+ auto pad_const = node_map.GetNode("LayoutOptimizer-p-c");
+ EXPECT_TRUE(pad_const);
+ EXPECT_TRUE(pad_const->attr().find("value") != pad_const->attr().end());
+ Tensor tensor;
+ EXPECT_TRUE(
+ tensor.FromProto(pad_const->mutable_attr()->at({"value"}).tensor()));
+ Tensor tensor_expected(DT_INT32, {4, 2});
+ test::FillValues<int>(&tensor_expected, {1, 2, 7, 8, 3, 4, 5, 6});
+ test::ExpectTensorEqual<int>(tensor_expected, tensor);
+}
+
} // namespace
} // namespace grappler
} // namespace tensorflow
diff --git a/tensorflow/core/grappler/optimizers/meta_optimizer.cc b/tensorflow/core/grappler/optimizers/meta_optimizer.cc
index 1174a390f3..a9875c06d8 100644
--- a/tensorflow/core/grappler/optimizers/meta_optimizer.cc
+++ b/tensorflow/core/grappler/optimizers/meta_optimizer.cc
@@ -110,27 +110,65 @@ Status MetaOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
bool already_optimized = false;
for (const auto& optimizer : optimizers) {
if (!already_optimized) {
- TF_RETURN_IF_ERROR(optimizer->Optimize(cluster, item, optimized_graph));
- already_optimized = true;
+ auto status = optimizer->Optimize(cluster, item, optimized_graph);
+ string result;
+ if (!status.ok()) {
+ VLOG(1) << "Not able to apply optimizer " << optimizer->name()
+ << ". Return status: " << status.ToString();
+ result = status.ToString();
+ } else {
+ already_optimized = true;
+ result = strings::StrCat(
+ "OK. "
+ "Graph size before: ",
+ item.graph.node_size(),
+ ". Graph size after: ", optimized_graph->node_size());
+ }
+ result_.push_back(std::make_pair(optimizer->name(), result));
} else {
GrapplerItem optimized_item(item, std::move(*optimized_graph));
- TF_RETURN_IF_ERROR(
- optimizer->Optimize(cluster, optimized_item, optimized_graph));
+ auto status =
+ optimizer->Optimize(cluster, optimized_item, optimized_graph);
+ string result;
+ if (!status.ok()) {
+ VLOG(1) << "Not able to apply optimizer " << optimizer->name()
+ << ". Return status: " << status.ToString();
+ optimized_graph->Swap(&optimized_item.graph);
+ result = status.ToString();
+ } else {
+ result = strings::StrCat(
+ "OK. "
+ "Graph size before: ",
+ optimized_item.graph.node_size(),
+ ". Graph size after: ", optimized_graph->node_size());
+ }
+ result_.push_back(std::make_pair(optimizer->name(), result));
}
}
- TopologicalSort(optimized_graph);
- // Make sure that the optimizers preserved the graph version and library.
- DCHECK_GE(optimized_graph->library().function_size(),
- item.graph.library().function_size());
- DCHECK_GE(optimized_graph->library().gradient_size(),
- item.graph.library().gradient_size());
- DCHECK_EQ(optimized_graph->versions().producer(),
- item.graph.versions().producer());
+ if (already_optimized) {
+ TopologicalSort(optimized_graph);
+ // Make sure that the optimizers preserved the graph version and library.
+ DCHECK_GE(optimized_graph->library().function_size(),
+ item.graph.library().function_size());
+ DCHECK_GE(optimized_graph->library().gradient_size(),
+ item.graph.library().gradient_size());
+ DCHECK_EQ(optimized_graph->versions().producer(),
+ item.graph.versions().producer());
+ } else {
+ *optimized_graph = item.graph;
+ }
return Status::OK();
}
+void MetaOptimizer::PrintResult() {
+ for (const auto& result : result_) {
+ LOG(INFO) << "Return status of optimizer " << result.first << ": "
+ << result.second;
+ }
+}
+
void MetaOptimizer::Feedback(Cluster* cluster, const GrapplerItem& item,
const GraphDef& pruned_graph, double result) {
// Nothing to do for MetaOptimizer.
diff --git a/tensorflow/core/grappler/optimizers/meta_optimizer.h b/tensorflow/core/grappler/optimizers/meta_optimizer.h
index b00886b964..382cfe51d4 100644
--- a/tensorflow/core/grappler/optimizers/meta_optimizer.h
+++ b/tensorflow/core/grappler/optimizers/meta_optimizer.h
@@ -37,6 +37,8 @@ class MetaOptimizer : public GraphOptimizer {
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
+ void PrintResult();
+
void Feedback(Cluster* cluster, const GrapplerItem& item,
const GraphDef& optimized_graph, double result) override;
@@ -44,6 +46,7 @@ class MetaOptimizer : public GraphOptimizer {
std::unique_ptr<GraphOptimizer> NewOptimizer(const string& optimizer);
DeviceBase* const cpu_device_; // may be NULL
RewriterConfig cfg_;
+ std::vector<std::pair<string, string>> result_;
};
bool MetaOptimizerEnabled(const RewriterConfig& cfg);
diff --git a/tensorflow/core/grappler/utils.cc b/tensorflow/core/grappler/utils.cc
index df6c0b9b1b..54be02b5f8 100644
--- a/tensorflow/core/grappler/utils.cc
+++ b/tensorflow/core/grappler/utils.cc
@@ -32,7 +32,9 @@ NodeMap::NodeMap(GraphDef* graph) : graph_(graph) {
auto node = graph_->mutable_node(i);
auto rslt = nodes_.insert(std::make_pair(node->name(), node));
// Check that the graph doesn't contain multiple nodes with the same name.
- CHECK(rslt.second);
+ if (!rslt.second) {
+ LOG(WARNING) << "Duplicated node in the graph: " << node->name();
+ }
for (const auto& input : node->input()) {
outputs_[NodeName(input)].insert(nodes_[node->name()]);
}
@@ -43,6 +45,7 @@ NodeDef* NodeMap::GetNode(const string& name) const {
string node_name = NodeName(name);
auto it = nodes_.find(node_name);
if (it == nodes_.end()) {
+ LOG(WARNING) << "Node " << node_name << " is not in the graph.";
return nullptr;
}
return it->second;
diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD
index f9020ef08e..bcc026f476 100644
--- a/tensorflow/core/kernels/BUILD
+++ b/tensorflow/core/kernels/BUILD
@@ -2277,6 +2277,7 @@ cc_library(
":cholesky_grad",
":cholesky_op",
":determinant_op",
+ ":matrix_exponential_op",
":matrix_inverse_op",
":matrix_solve_ls_op",
":matrix_solve_op",
@@ -2344,6 +2345,12 @@ tf_kernel_library(
)
tf_kernel_library(
+ name = "matrix_exponential_op",
+ prefix = "matrix_exponential_op",
+ deps = LINALG_DEPS,
+)
+
+tf_kernel_library(
name = "self_adjoint_eig_op",
prefix = "self_adjoint_eig_op",
deps = LINALG_DEPS,
diff --git a/tensorflow/core/kernels/dataset.h b/tensorflow/core/kernels/dataset.h
index 44f7c2aca3..4a42ac80c3 100644
--- a/tensorflow/core/kernels/dataset.h
+++ b/tensorflow/core/kernels/dataset.h
@@ -242,29 +242,18 @@ class GraphDefBuilderWrapper {
// TODO(mrry): We will probably need to support more of
// OpKernelContext here. For example, should allocation be handled by
// the IteratorContext?
-// TODO(mrry): We will need to fabricate step IDs for calls to ops
-// that are not nested within a particular step.
// TODO(mrry): We're making some daring assumptions about the lifetime
-// of the FunctionLibraryRuntime and runner passed in here. Once
-// created, a FunctionLibraryRuntime should stay alive for the
-// remainder of a session, so we copy the pointer. A runner will be
-// deleted when the original step ends, but all existing runners only
-// close over session-lifetime (or longer-lived) state, so we can make
-// a copy of the function. There's nothing in the definition of either
-// class to guarantee that what we are doing is safe. We should
-// formalize the properties here.
+// of the runner passed in here. A runner will be deleted when the original
+// step ends, but all existing runners only close over session-lifetime (or
+// longer-lived) state, so we can make a copy of the function. There's nothing
+// in the definition of the API from which we took the runner to guarantee that
+// what we are doing is safe. We should formalize the properties here.
class IteratorContext {
public:
struct Params {
// Interface to operating system functionality.
Env* env;
- // The step being executed.
- int64 step_id = 0;
-
- // Shared resources accessible by this iterator invocation.
- ResourceMgr* resource_manager = nullptr;
-
// Function call support.
std::function<void(std::function<void()>)> runner = nullptr;
};
@@ -273,14 +262,10 @@ class IteratorContext {
Env* env() const { return params_.env; }
- int64 step_id() const { return params_.step_id; }
-
std::function<void(std::function<void()>)>* runner() {
return &params_.runner;
}
- ResourceMgr* resource_manager() const { return params_.resource_manager; }
-
private:
Params params_;
};
diff --git a/tensorflow/core/kernels/generate_vocab_remapping_op.cc b/tensorflow/core/kernels/generate_vocab_remapping_op.cc
index 247c1f2457..2b97677e38 100644
--- a/tensorflow/core/kernels/generate_vocab_remapping_op.cc
+++ b/tensorflow/core/kernels/generate_vocab_remapping_op.cc
@@ -41,6 +41,8 @@ class GenerateVocabRemappingOp : public OpKernel {
OP_REQUIRES_OK(context,
context->GetAttr("new_vocab_offset", &new_vocab_offset_));
OP_REQUIRES_OK(context, context->GetAttr("num_new_vocab", &num_new_vocab_));
+ OP_REQUIRES_OK(context,
+ context->GetAttr("old_vocab_size", &old_vocab_size_));
}
void Compute(OpKernelContext* context) override {
@@ -92,16 +94,14 @@ class GenerateVocabRemappingOp : public OpKernel {
lookup::HashTable<string, int64>* old_vocab_table =
new lookup::HashTable<string, int64>(context, this);
core::ScopedUnref unref_old(old_vocab_table);
- // Note: we pass -1 (unknown) for vocab_size, which is supposed to be the
- // total elements in file. This is different from num_new_vocab_, which
- // accounts for partitioning.
- OP_REQUIRES_OK(context, lookup::InitializeTableFromTextFile(
- old_vocab_filename,
- -1, // vocab_size
- kUnusedLookupDelim,
- -2, // key_index, use the whole line/token.
- -1, // value_index, use the line number.
- context->env(), old_vocab_table));
+ // Note: If old_vocab_size_ is -1 (unknown), we retrieve all elements in
+ // file (see TextFileLineIterator).
+ OP_REQUIRES_OK(context,
+ lookup::InitializeTableFromTextFile(
+ old_vocab_filename, old_vocab_size_, kUnusedLookupDelim,
+ -2, // key_index, use the whole line/token.
+ -1, // value_index, use the line number.
+ context->env(), old_vocab_table));
// Fill out new_ids = [new_vocab_offset, new_vocab_offset + 1, ...,
// new_vocab_offset + num_new_vocab_]
@@ -165,6 +165,7 @@ class GenerateVocabRemappingOp : public OpKernel {
private:
int new_vocab_offset_;
int num_new_vocab_;
+ int old_vocab_size_;
};
REGISTER_KERNEL_BUILDER(Name("GenerateVocabRemapping").Device(DEVICE_CPU),
diff --git a/tensorflow/core/kernels/iterator_ops.cc b/tensorflow/core/kernels/iterator_ops.cc
index ad9355d3de..ae77ae6433 100644
--- a/tensorflow/core/kernels/iterator_ops.cc
+++ b/tensorflow/core/kernels/iterator_ops.cc
@@ -427,8 +427,6 @@ class ToSingleElementOp : public OpKernel {
IteratorContext::Params params;
params.env = ctx->env();
- params.step_id = ctx->step_id();
- params.resource_manager = ctx->resource_manager();
params.runner = *(ctx->runner());
IteratorContext iter_ctx(std::move(params));
@@ -664,8 +662,6 @@ class IteratorGetNextOp : public AsyncOpKernel {
IteratorContext::Params params;
params.env = ctx->env();
- params.step_id = ctx->step_id();
- params.resource_manager = ctx->resource_manager();
params.runner = *(ctx->runner());
IteratorContext iter_ctx(std::move(params));
@@ -787,7 +783,7 @@ class SerializeIteratorOp : public OpKernel {
IteratorResource* iterator_resource;
OP_REQUIRES_OK(
ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &iterator_resource));
- iterator_resource->Unref();
+ core::ScopedUnref unref_iterator(iterator_resource);
Tensor* variant_t;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &variant_t));
IteratorStateVariant v;
diff --git a/tensorflow/core/kernels/matrix_exponential_op.cc b/tensorflow/core/kernels/matrix_exponential_op.cc
new file mode 100644
index 0000000000..4cc3f32f7e
--- /dev/null
+++ b/tensorflow/core/kernels/matrix_exponential_op.cc
@@ -0,0 +1,59 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// See docs in ../ops/linalg_ops.cc.
+
+#include "third_party/eigen3/Eigen/Core"
+#include "third_party/eigen3/unsupported/Eigen/MatrixFunctions"
+#include "tensorflow/core/framework/kernel_def_builder.h"
+#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/tensor_shape.h"
+#include "tensorflow/core/kernels/linalg_ops_common.h"
+#include "tensorflow/core/lib/core/errors.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
+#include "tensorflow/core/platform/types.h"
+
+
+namespace tensorflow {
+
+template <class Scalar>
+class MatrixExponentialOp : public LinearAlgebraOp<Scalar> {
+ public:
+ INHERIT_LINALG_TYPEDEFS(Scalar);
+
+ explicit MatrixExponentialOp(OpKernelConstruction* context) : Base(context) {}
+
+ void ComputeMatrix(OpKernelContext* context, const ConstMatrixMaps& inputs,
+ MatrixMaps* outputs) final {
+ const ConstMatrixMap& input = inputs[0];
+ if (input.rows() == 0) return;
+ using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
+ Matrix tmp = input;
+ outputs->at(0) = tmp.exp();
+ }
+
+ private:
+ TF_DISALLOW_COPY_AND_ASSIGN(MatrixExponentialOp);
+};
+
+REGISTER_LINALG_OP("MatrixExponential", (MatrixExponentialOp<float>), float);
+REGISTER_LINALG_OP("MatrixExponential", (MatrixExponentialOp<double>), double);
+REGISTER_LINALG_OP("MatrixExponential",
+ (MatrixExponentialOp<complex64>), complex64);
+REGISTER_LINALG_OP("MatrixExponential",
+ (MatrixExponentialOp<complex128>), complex128);
+
+} // namespace tensorflow
diff --git a/tensorflow/core/kernels/prefetch_dataset_op.cc b/tensorflow/core/kernels/prefetch_dataset_op.cc
index a7aac508eb..80592aa353 100644
--- a/tensorflow/core/kernels/prefetch_dataset_op.cc
+++ b/tensorflow/core/kernels/prefetch_dataset_op.cc
@@ -37,30 +37,14 @@ class PrefetchDatasetOp : public UnaryDatasetOpKernel {
OP_REQUIRES_OK(
ctx, ParseScalarArgument<int64>(ctx, "buffer_size", &buffer_size));
- // TODO(mrry): It seems unnatural to capture the params from *this
- // kernel's* OpKernelContext, although the captured values should
- // be the same for any kernel in the same session. Consider adding
- // an IteratorContext* argument to Dataset::MakeIterator(), and
- // threading the context information through that
- // way. Alternatively, provide a session-scoped context that will
- // provide this information to all users in the same session (and
- // that will have the appropriate lifetime).
- IteratorContext::Params params;
- params.env = ctx->env();
- params.resource_manager = ctx->resource_manager();
- params.runner = *(ctx->runner());
-
- *output = new Dataset(input, buffer_size, std::move(params));
+ *output = new Dataset(input, buffer_size);
}
private:
class Dataset : public DatasetBase {
public:
- Dataset(const DatasetBase* input, int64 buffer_size,
- IteratorContext::Params ctx_params)
- : input_(input),
- buffer_size_(buffer_size),
- ctx_params_(std::move(ctx_params)) {
+ Dataset(const DatasetBase* input, int64 buffer_size)
+ : input_(input), buffer_size_(buffer_size) {
input_->Ref();
}
@@ -218,7 +202,6 @@ class PrefetchDatasetOp : public UnaryDatasetOpKernel {
const DatasetBase* const input_;
const int64 buffer_size_;
- const IteratorContext::Params ctx_params_;
};
};
diff --git a/tensorflow/core/kernels/tensor_array.h b/tensorflow/core/kernels/tensor_array.h
index 6882a8a0e5..2a41d4c419 100644
--- a/tensorflow/core/kernels/tensor_array.h
+++ b/tensorflow/core/kernels/tensor_array.h
@@ -537,30 +537,33 @@ Status TensorArray::LockedRead(OpKernelContext* ctx, const int32 index,
" but array size is: ", tensors_.size());
}
size_t index_t = static_cast<size_t>(index);
- if (is_grad_ && (index_t >= tensors_.size() || !tensors_[index].written)) {
+ if ((is_grad_ && (index_t >= tensors_.size() || !tensors_[index].written)) ||
+ (!is_grad_ && (index_t < tensors_.size() && !tensors_[index].written))) {
// Special case returning zeros if this is a gradient read that happens
// after a stop_gradients call with dynamic forward TensorArrays.
// There is sometimes a race condition where the gradient is not
// written due to stop_gradients, but is later read.
TensorShape element_shape;
- if (index_t < tensors_.size() && tensors_[index].shape.dims() > 0) {
+ if (is_grad_ && index_t < tensors_.size() &&
+ tensors_[index].shape.dims() > 0) {
+ // A gradient TensorArray has more specific gradient information
+ // available for each entry. A forward TensorArray must rely on
+ // the global element_shape_ to fill in zeros on read.
element_shape = tensors_[index].shape;
} else if (!element_shape_.IsFullyDefined()) {
return errors::InvalidArgument(
"TensorArray ", handle_.vec<string>()(1),
- ": Could not read from gradient TensorArray index ", index,
+ ": Could not read from TensorArray index ", index,
". Furthermore, the element shape is not fully defined: ",
element_shape_.DebugString(),
- ". "
- "It is likely you are working with a resizeable TensorArray and "
- "stop_gradients "
- "is not allowing the gradients to be written. If you set the full "
- "element_shape "
- "property on the forward TensorArray, the proper all-zeros tensor "
- "will be "
- "returned instead of incurring this error.");
+ ". It is possible you are working with a resizeable TensorArray and "
+ "stop_gradients is not allowing the gradients to be written. If you "
+ "set the full "
+ "element_shape property on the forward TensorArray, the proper "
+ "all-zeros tensor "
+ "will be returned instead of incurring this error.");
} else {
- DCHECK(element_shape_.AsTensorShape(&element_shape));
+ element_shape_.AsTensorShape(&element_shape); // Always succeeds.
}
if (index_t >= tensors_.size()) {
// Fill in tensors_ up to index to have known shape.
@@ -578,13 +581,6 @@ Status TensorArray::LockedRead(OpKernelContext* ctx, const int32 index,
TensorAndState& t = tensors_[index];
- if (!t.written) {
- return errors::InvalidArgument("TensorArray ", handle_.vec<string>()(1),
- ": Could not read from TensorArray index ",
- index,
- " because it has not yet been written to.");
- }
-
if (t.cleared) {
return errors::InvalidArgument("TensorArray ", handle_.vec<string>()(1),
": Could not read index ", index,
diff --git a/tensorflow/core/ops/checkpoint_ops.cc b/tensorflow/core/ops/checkpoint_ops.cc
index b49d7b4d40..08b00c8255 100644
--- a/tensorflow/core/ops/checkpoint_ops.cc
+++ b/tensorflow/core/ops/checkpoint_ops.cc
@@ -22,6 +22,7 @@ REGISTER_OP("GenerateVocabRemapping")
.Input("old_vocab_file: string")
.Attr("new_vocab_offset: int >= 0")
.Attr("num_new_vocab: int >= 0")
+ .Attr("old_vocab_size: int >= -1 = -1")
.Output("remapping: int64")
.Output("num_present: int32")
.SetShapeFn([](shape_inference::InferenceContext* c) {
@@ -43,7 +44,11 @@ Given a path to new and old vocabulary files, returns a remapping Tensor of
length `num_new_vocab`, where `remapping[i]` contains the row number in the old
vocabulary that corresponds to row `i` in the new vocabulary (starting at line
`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
-in the new vocabulary is not in the old vocabulary. `num_vocab_offset` enables
+in the new vocabulary is not in the old vocabulary. The old vocabulary is
+constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the
+default value of -1.
+
+`num_vocab_offset` enables
use in the partitioned variable case, and should generally be set through
examining partitioning info. The format of the files should be a text file,
with each line containing a single entity within the vocabulary.
@@ -69,6 +74,8 @@ new_vocab_file: Path to the new vocab file.
old_vocab_file: Path to the old vocab file.
new_vocab_offset: How many entries into the new vocab file to start reading.
num_new_vocab: Number of entries in the new vocab file to remap.
+old_vocab_size: Number of entries in the old vocab file to consider. If -1,
+ use the entire old vocabulary.
remapping: A Tensor of length num_new_vocab where the element at index i
is equal to the old ID that maps to the new ID i. This element is -1 for any
new ID that is not found in the old vocabulary.
diff --git a/tensorflow/core/ops/compat/ops_history.v1.pbtxt b/tensorflow/core/ops/compat/ops_history.v1.pbtxt
index 382812be18..973691a353 100644
--- a/tensorflow/core/ops/compat/ops_history.v1.pbtxt
+++ b/tensorflow/core/ops/compat/ops_history.v1.pbtxt
@@ -13344,6 +13344,44 @@ op {
}
}
op {
+ name: "GenerateVocabRemapping"
+ input_arg {
+ name: "new_vocab_file"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "old_vocab_file"
+ type: DT_STRING
+ }
+ output_arg {
+ name: "remapping"
+ type: DT_INT64
+ }
+ output_arg {
+ name: "num_present"
+ type: DT_INT32
+ }
+ attr {
+ name: "new_vocab_offset"
+ type: "int"
+ has_minimum: true
+ }
+ attr {
+ name: "num_new_vocab"
+ type: "int"
+ has_minimum: true
+ }
+ attr {
+ name: "old_vocab_size"
+ type: "int"
+ default_value {
+ i: -1
+ }
+ has_minimum: true
+ minimum: -1
+ }
+}
+op {
name: "GetSessionHandle"
input_arg {
name: "value"
diff --git a/tensorflow/core/ops/linalg_ops.cc b/tensorflow/core/ops/linalg_ops.cc
index 4851619f83..53e2360d23 100644
--- a/tensorflow/core/ops/linalg_ops.cc
+++ b/tensorflow/core/ops/linalg_ops.cc
@@ -282,6 +282,33 @@ Equivalent to np.linalg.inv
@end_compatibility
)doc");
+REGISTER_OP("MatrixExponential")
+ .Input("input: T")
+ .Output("output: T")
+ .Attr("T: {double, float, complex64, complex128}")
+ .SetShapeFn(BatchUnchangedSquareShapeFn)
+ .Doc(R"doc(
+Computes the matrix exponential of one or more square matrices:
+
+exp(A) = \sum_{n=0}^\infty A^n/n!
+
+The exponential is computed using a combination of the scaling and squaring
+method and the Pade approximation. Details can be founds in:
+Nicholas J. Higham, "The scaling and squaring method for the matrix exponential
+revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
+
+The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+form square matrices. The output is a tensor of the same shape as the input
+containing the exponential for all input submatrices `[..., :, :]`.
+
+input: Shape is `[..., M, M]`.
+output: Shape is `[..., M, M]`.
+
+@compatibility(scipy)
+Equivalent to scipy.linalg.expm
+@end_compatibility
+)doc");
+
REGISTER_OP("Cholesky")
.Input("input: T")
.Output("output: T")
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index 58d0fb3e73..f1c6b84516 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -10082,8 +10082,18 @@ op {
description: "Number of entries in the new vocab file to remap."
has_minimum: true
}
+ attr {
+ name: "old_vocab_size"
+ type: "int"
+ default_value {
+ i: -1
+ }
+ description: "Number of entries in the old vocab file to consider. If -1,\nuse the entire old vocabulary."
+ has_minimum: true
+ minimum: -1
+ }
summary: "Given a path to new and old vocabulary files, returns a remapping Tensor of"
- description: "length `num_new_vocab`, where `remapping[i]` contains the row number in the old\nvocabulary that corresponds to row `i` in the new vocabulary (starting at line\n`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`\nin the new vocabulary is not in the old vocabulary. `num_vocab_offset` enables\nuse in the partitioned variable case, and should generally be set through\nexamining partitioning info. The format of the files should be a text file,\nwith each line containing a single entity within the vocabulary.\n\nFor example, with `new_vocab_file` a text file containing each of the following\nelements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],\n`num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be\n`[0, -1, 2]`.\n\nThe op also returns a count of how many entries in the new vocabulary\nwere present in the old vocabulary, which is used to calculate the number of\nvalues to initialize in a weight matrix remapping\n\nThis functionality can be used to remap both row vocabularies (typically,\nfeatures) and column vocabularies (typically, classes) from TensorFlow\ncheckpoints. Note that the partitioning logic relies on contiguous vocabularies\ncorresponding to div-partitioned variables. Moreover, the underlying remapping\nuses an IndexTable (as opposed to an inexact CuckooTable), so client code should\nuse the corresponding index_table_from_file() as the FeatureColumn framework\ndoes (as opposed to tf.feature_to_id(), which uses a CuckooTable)."
+ description: "length `num_new_vocab`, where `remapping[i]` contains the row number in the old\nvocabulary that corresponds to row `i` in the new vocabulary (starting at line\n`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`\nin the new vocabulary is not in the old vocabulary. The old vocabulary is\nconstrained to the first `old_vocab_size` entries if `old_vocab_size` is not the\ndefault value of -1.\n\n`num_vocab_offset` enables\nuse in the partitioned variable case, and should generally be set through\nexamining partitioning info. The format of the files should be a text file,\nwith each line containing a single entity within the vocabulary.\n\nFor example, with `new_vocab_file` a text file containing each of the following\nelements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],\n`num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be\n`[0, -1, 2]`.\n\nThe op also returns a count of how many entries in the new vocabulary\nwere present in the old vocabulary, which is used to calculate the number of\nvalues to initialize in a weight matrix remapping\n\nThis functionality can be used to remap both row vocabularies (typically,\nfeatures) and column vocabularies (typically, classes) from TensorFlow\ncheckpoints. Note that the partitioning logic relies on contiguous vocabularies\ncorresponding to div-partitioned variables. Moreover, the underlying remapping\nuses an IndexTable (as opposed to an inexact CuckooTable), so client code should\nuse the corresponding index_table_from_file() as the FeatureColumn framework\ndoes (as opposed to tf.feature_to_id(), which uses a CuckooTable)."
}
op {
name: "GetSessionHandle"
diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go
index 385248d403..5bea322c1c 100644
--- a/tensorflow/go/op/wrappers.go
+++ b/tensorflow/go/op/wrappers.go
@@ -18445,12 +18445,32 @@ func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output,
return scope.AddOperation(opspec)
}
+// GenerateVocabRemappingAttr is an optional argument to GenerateVocabRemapping.
+type GenerateVocabRemappingAttr func(optionalAttr)
+
+// GenerateVocabRemappingOldVocabSize sets the optional old_vocab_size attribute to value.
+//
+// value: Number of entries in the old vocab file to consider. If -1,
+// use the entire old vocabulary.
+// If not specified, defaults to -1
+//
+// REQUIRES: value >= -1
+func GenerateVocabRemappingOldVocabSize(value int64) GenerateVocabRemappingAttr {
+ return func(m optionalAttr) {
+ m["old_vocab_size"] = value
+ }
+}
+
// Given a path to new and old vocabulary files, returns a remapping Tensor of
//
// length `num_new_vocab`, where `remapping[i]` contains the row number in the old
// vocabulary that corresponds to row `i` in the new vocabulary (starting at line
// `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
-// in the new vocabulary is not in the old vocabulary. `num_vocab_offset` enables
+// in the new vocabulary is not in the old vocabulary. The old vocabulary is
+// constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the
+// default value of -1.
+//
+// `num_vocab_offset` enables
// use in the partitioned variable case, and should generally be set through
// examining partitioning info. The format of the files should be a text file,
// with each line containing a single entity within the vocabulary.
@@ -18481,11 +18501,14 @@ func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output,
// Returns A Tensor of length num_new_vocab where the element at index i
// is equal to the old ID that maps to the new ID i. This element is -1 for any
// new ID that is not found in the old vocabulary.Number of new vocab entries found in old vocab.
-func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64) (remapping tf.Output, num_present tf.Output) {
+func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64, optional ...GenerateVocabRemappingAttr) (remapping tf.Output, num_present tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"new_vocab_offset": new_vocab_offset, "num_new_vocab": num_new_vocab}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
Type: "GenerateVocabRemapping",
Input: []tf.Input{
diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD
index 02e88f4888..76477384de 100644
--- a/tensorflow/python/BUILD
+++ b/tensorflow/python/BUILD
@@ -130,6 +130,7 @@ py_library(
":pywrap_tensorflow",
":util",
"//tensorflow/core:protos_all_py",
+ "@absl_py//absl/flags",
"@six_archive//:six",
],
)
@@ -1544,6 +1545,7 @@ py_library(
":platform",
":sparse_tensor",
":tensor_array_ops",
+ ":tf_should_use",
":util",
"//tensorflow/core:protos_all_py",
"@six_archive//:six",
@@ -1833,6 +1835,7 @@ py_library(
":control_flow_ops",
":framework_for_generated_wrappers",
":math_ops",
+ ":tf_should_use",
],
)
@@ -2307,7 +2310,7 @@ py_library(
":math_ops",
":tensor_shape",
":tensor_util",
- ":util",
+ ":tf_should_use",
"//tensorflow/python/eager:context",
],
)
@@ -2344,6 +2347,7 @@ py_library(
":math_ops",
":state_ops",
":tensor_shape",
+ ":tf_should_use",
":util",
"//tensorflow/core:protos_all_py",
"//tensorflow/python/eager:context",
@@ -2727,6 +2731,7 @@ py_library(
["util/**/*.py"],
exclude = [
"util/example_parser*",
+ "util/tf_should_use.py",
"util/**/*_test.py",
],
),
@@ -2789,6 +2794,17 @@ py_test(
],
)
+py_library(
+ name = "tf_should_use",
+ srcs = ["util/tf_should_use.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":util",
+ "//tensorflow/python/eager:context",
+ "@six_archive//:six",
+ ],
+)
+
py_test(
name = "tf_should_use_test",
size = "small",
@@ -2796,7 +2812,7 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":client_testlib",
- ":util",
+ ":tf_should_use",
],
)
@@ -4350,8 +4366,16 @@ cuda_py_test(
":client_testlib",
":framework_for_generated_wrappers",
":array_ops",
+ ":constant_op",
+ ":dtypes",
+ ":functional_ops",
+ ":layers",
+ ":math_ops",
":nn",
+ ":ops",
":random_ops",
+ ":tf_optimizer",
+ ":training",
"//third_party/py/numpy",
"//tensorflow/core:protos_all_py",
],
diff --git a/tensorflow/python/eager/BUILD b/tensorflow/python/eager/BUILD
index f5b946ec26..bcd1e1d0dc 100644
--- a/tensorflow/python/eager/BUILD
+++ b/tensorflow/python/eager/BUILD
@@ -3,6 +3,10 @@ licenses(["notice"]) # Apache 2.0
load("//tensorflow:tensorflow.bzl", "py_test")
load("//tensorflow:tensorflow.bzl", "cuda_py_test")
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
+load(
+ "//tensorflow/tools/test:performance.bzl",
+ "tf_py_logged_benchmark",
+)
cc_library(
name = "pywrap_tfe_lib",
@@ -356,22 +360,26 @@ py_library(
],
)
-py_test(
+cuda_py_test(
name = "benchmarks_test",
srcs = ["benchmarks_test.py"],
- srcs_version = "PY2AND3",
- deps = [
+ additional_deps = [
":backprop",
":context",
":function",
":test",
+ "//third_party/py/numpy",
"//tensorflow/python:math_ops",
"//tensorflow/python:pywrap_tensorflow",
"//tensorflow/python:random_ops",
- "//third_party/py/numpy",
],
)
+tf_py_logged_benchmark(
+ name = "benchmarks",
+ target = "//tensorflow/python/eager:benchmarks_test",
+)
+
py_test(
name = "tape_test",
srcs = ["tape_test.py"],
diff --git a/tensorflow/python/eager/benchmarks_test.py b/tensorflow/python/eager/benchmarks_test.py
index 1a2f99fe9e..26a70a617d 100644
--- a/tensorflow/python/eager/benchmarks_test.py
+++ b/tensorflow/python/eager/benchmarks_test.py
@@ -12,21 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Benchmarks for low-level eager execution primitives.
+r"""Benchmarks for low-level eager execution primitives.
-Packaged as a test to ensure that this code is exercised by continuous
-integration tests. To get numbers:
+To run CPU benchmarks:
+ bazel run -c opt benchmarks_test -- --benchmarks=.
- bazel build -c opt :benchmarks_test &&
- ./bazel-bin/tensorflow/python/eager/benchmarks_test --iters=0
+To run GPU benchmarks:
+ bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
+ --benchmarks=.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import argparse
-import contextlib
-import sys
import time
import numpy as np
@@ -39,161 +37,274 @@ from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
-from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
-FLAGS = None
-
-
-@contextlib.contextmanager
-def timer(label, iters=30000):
- start = time.time()
- yield xrange(iters)
- end = time.time()
- t = (end - start) * 1e6 / iters
- print("%-40s took %.2fus (%d iterations)" % (label, t, iters))
-
-
-def benchmark_create_tensor(n):
- """Benchmark overheads of creating a Tensor object."""
-
- def label(s):
- return "{:20s}".format(s)
-
- with timer(label("np.array([[3.0]])"), iters=n) as iters:
- for _ in iters:
- np.array([[3.0]])
-
- ctx = context.context()
- handle = ctx._handle
- device = ctx.device_name
- # May be warmup GPU.
- ops.EagerTensor([[3.0]], context=handle, device=device)
-
- # float32
- dtype = dtypes.float32.as_datatype_enum
- three = [[3.0]]
- with timer(label("EagerTensor([[3.0]])"), iters=n) as iters:
- for _ in iters:
- ops.EagerTensor(three, context=handle, device=device, dtype=dtype)
-
- np_3 = np.array([[3.0]], dtype=np.float32)
- with timer(label("EagerTensor(np.array([[3.0]]))"), iters=n) as iters:
- for _ in iters:
- ops.EagerTensor(np_3, context=handle, device=device, dtype=dtype)
-
- # int32.
- # This is interesting since int32 will be kept on host memory for the GPU
- # case.
- dtype = dtypes.int32.as_datatype_enum
- three = [[3]]
- with timer(label("EagerTensor([[3]])"), iters=n) as iters:
- for _ in iters:
- ops.EagerTensor(three, context=handle, device=device, dtype=dtype)
-
- np_3 = np.array([[3]], dtype=np.int32)
- with timer(label("EagerTensor(np.array([[3]]))"), iters=n) as iters:
- for _ in iters:
- ops.EagerTensor(np_3, context=handle, device=device, dtype=dtype)
-
-
-def benchmark_matmul(shape, n, use_gpu=False):
- """Benchmark for matrix multiplication using tf.matmul."""
- transpose_b = (shape[0] != shape[1])
- m = random_ops.random_uniform(shape)
- if use_gpu:
- m = m.gpu()
- # Warm up the GPU - the very first kernel invocation
- # seems to require a bunch of setup.
- math_ops.matmul(m, m, transpose_b=transpose_b)
-
- def label(s):
- return "MatMul {}: {:30s}".format(shape, s)
-
- if not use_gpu:
- a = m.cpu().numpy()
- b = a.T if transpose_b else a
- with timer(label("np.dot"), iters=n) as iters:
- for _ in iters:
- np.dot(a, b)
- with timer(label("tf.matmul"), iters=n) as iters:
- for _ in iters:
- math_ops.matmul(m, m, transpose_b=transpose_b)
+CPU = "/device:CPU:0"
+GPU = "/device:GPU:0"
+
+
+class MicroBenchmarks(test.Benchmark):
+
+ def __init__(self):
+ # used for multiply benchmarks
+ self._m_2 = random_ops.random_uniform([2])
+
+ # used for matmul benchmarks
+ self._m_2_by_2 = random_ops.random_uniform((2, 2))
+ self._m_100_by_784 = random_ops.random_uniform((100, 784))
+ self._num_iters_2_by_2 = 30000
+ self._num_iters_100_by_784 = 1000
+
+ def _run(self, func, num_iters):
+ # call func to maybe warm up the GPU
+ func()
+ start = time.time()
+ for _ in xrange(num_iters):
+ func()
+ end = time.time()
+ mean_us = (end - start) * 1e6 / num_iters
+ self.report_benchmark(iters=num_iters, wall_time=mean_us)
+
+ def benchmark_create_np_array(self):
+ func = lambda: np.array([3.0])
+ self._run(func, 30000)
+
+ def _benchmark_create_tensor(self, value, dtype, device):
+ """Benchmark overheads of creating a Tensor object."""
+ ctx = context.context()
+ handle = ctx._handle
+ if device == GPU:
+ # Warmup the GPU
+ ops.EagerTensor(value, context=handle, device=device)
+
+ def func():
+ ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
+ self._run(func, 30000)
+
+ def benchmark_create_float_tensor_from_list_CPU(self):
+ self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, CPU)
+
+ def benchmark_create_float_tensor_from_np_array_CPU(self):
+ self._benchmark_create_tensor(
+ np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
+ CPU)
+
+ def benchmark_create_int32_tensor_from_list_CPU(self):
+ self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, CPU)
+
+ def benchmark_create_int32_tensor_from_np_array_CPU(self):
+ self._benchmark_create_tensor(
+ np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, CPU)
+
+ def benchmark_create_float_tensor_from_list_GPU(self):
+ if not context.num_gpus():
+ return
+ self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
+
+ def benchmark_create_float_tensor_from_np_array_GPU(self):
+ if not context.num_gpus():
+ return
+ self._benchmark_create_tensor(
+ np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
+ GPU)
+
+ def benchmark_create_int32_tensor_from_list_GPU(self):
+ # int32's are kept on host memory even when executing on GPU.
+ if not context.num_gpus():
+ return
+ self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
+
+ def benchmark_create_int32_tensor_from_np_array_GPU(self):
+ # int32's are kept on host memory even when executing on GPU.
+ if not context.num_gpus():
+ return
+ self._benchmark_create_tensor(
+ np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU)
+
+ def _benchmark_np_multiply(self, m, num_iters):
+ a = m.cpu().numpy()
+ func = lambda: a * a
+ self._run(func, num_iters)
- with timer(label("gen_math_ops.mat_mul"), iters=n) as iters:
- for _ in iters:
- gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)
+ def _benchmark_tf_multiply(self, m, num_iters):
+ func = lambda: m * m
+ self._run(func, num_iters)
- inputs = [m, m]
- # pylint: disable=protected-access
- ctx_handle = context.context()._handle
- # pylint: enable=protected-access
- attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
- m.dtype.as_datatype_enum)
- with timer(label("TFE_Py_Execute"), iters=n) as iters:
- for _ in iters:
- pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "MatMul",
- inputs, attrs, 1)
-
- f = function.defun(math_ops.matmul)
- with timer(label("defun(tf.matmul)"), iters=n) as iters:
- for _ in iters:
- f(m, m, transpose_b=transpose_b)
-
-
-def benchmark_multiply(shape, n, use_gpu=False):
- m = random_ops.random_uniform(shape)
- if use_gpu:
- m = m.gpu()
- # Warm up the GPU - the very first kernel invocation
- # seems to require a bunch of setup.
- _ = m * m
-
- def label(s):
- return "Multiply {}: {:30s}".format(shape, s)
-
- if not use_gpu:
- a = m.cpu().numpy()
- with timer(label("np.multiply"), iters=n) as iters:
- for _ in iters:
- _ = a * a
+ def benchmark_np_multiply(self):
+ self._benchmark_np_multiply(self._m_2, 30000)
- with timer(label("tf.multiply"), iters=n) as iters:
- for _ in iters:
- _ = m * m
+ def benchmark_tf_multiply_CPU(self):
+ with context.device(CPU):
+ m = self._m_2.cpu()
+ self._benchmark_tf_multiply(m, 30000)
+ def benchmark_tf_multiply_GPU(self):
+ if not context.num_gpus():
+ return
+ with context.device(GPU):
+ m = self._m_2.gpu()
+ self._benchmark_tf_multiply(m, 30000)
-class BenchmarksTest(test_util.TensorFlowTestCase):
+ def _benchmark_np_matmul(self, m, transpose_b, num_iters):
+ a = m.cpu().numpy()
+ b = a.T if transpose_b else a
+ func = lambda: np.dot(a, b)
+ self._run(func, num_iters)
- def testBenchmarks(self):
- # This isn't actually a test, but benchmarks packaged as a test
- # so that continuous integration runs catch any breakages.
- print(context.context())
- benchmark_create_tensor(FLAGS.iters or 30000)
- benchmark_matmul([2, 2], FLAGS.iters or 30000)
- benchmark_matmul([100, 28 * 28], FLAGS.iters or 1000)
- benchmark_multiply([2], FLAGS.iters or 30000)
+ def _benchmark_tf_matmul(self, m, transpose_b, num_iters):
+ func = lambda: math_ops.matmul(m, m, transpose_b=transpose_b)
+ self._run(func, num_iters)
- if context.context().num_gpus() > 0:
- print("---- RUNNING ON GPU NOW ----")
- with context.device("/device:GPU:0"):
- benchmark_create_tensor(FLAGS.iters or 30000)
- benchmark_matmul([2, 2], FLAGS.iters or 30000, use_gpu=True)
- benchmark_matmul([100, 28 * 28], FLAGS.iters or 1000, use_gpu=True)
- benchmark_multiply([2], FLAGS.iters or 30000, use_gpu=True)
+ def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters):
+ def func():
+ gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)
+ self._run(func, num_iters)
+
+ def _benchmark_tfe_py_execute_matmul(self, m, transpose_b, num_iters):
+ inputs = [m, m]
+ # pylint: disable=protected-access
+ ctx_handle = context.context()._handle
+ # pylint: enable=protected-access
+ attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
+ m.dtype.as_datatype_enum)
+ def func():
+ pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "MatMul", inputs,
+ attrs, 1)
+
+ self._run(func, num_iters)
+
+ def _benchmark_defun_matmul(self, m, transpose_b, num_iters):
+ f = function.defun(math_ops.matmul)
+ func = lambda: f(m, m, transpose_b)
+ self._run(func, num_iters)
+
+ # Benchmarks for A^2, A of dimension 2 by 2.
+ def benchmark_np_matmul_2_by_2(self):
+ self._benchmark_np_matmul(
+ self._m_2_by_2, transpose_b=False, num_iters=self._num_iters_2_by_2)
+
+ def benchmark_tf_matmul_2_by_2_CPU(self):
+ with context.device(CPU):
+ m = self._m_2_by_2.cpu()
+ self._benchmark_tf_matmul(
+ m, transpose_b=False, num_iters=self._num_iters_2_by_2)
+
+ def benchmark_gen_math_ops_matmul_2_by_2_CPU(self):
+ with context.device(CPU):
+ m = self._m_2_by_2.cpu()
+ self._benchmark_gen_math_ops_matmul(
+ m, transpose_b=False, num_iters=self._num_iters_2_by_2)
+
+ def benchmark_tfe_py_execute_matmul_2_by_2_CPU(self):
+ with context.device(CPU):
+ m = self._m_2_by_2.cpu()
+ self._benchmark_tfe_py_execute_matmul(
+ m, transpose_b=False, num_iters=self._num_iters_2_by_2)
+
+ def benchmark_defun_matmul_2_by_2_CPU(self):
+ with context.device(CPU):
+ m = self._m_2_by_2.cpu()
+ self._benchmark_defun_matmul(
+ m, transpose_b=False, num_iters=self._num_iters_2_by_2)
+
+ def benchmark_tf_matmul_2_by_2_GPU(self):
+ if not context.num_gpus():
+ return
+ with context.device(GPU):
+ m = self._m_2_by_2.gpu()
+ self._benchmark_tf_matmul(
+ m, transpose_b=False, num_iters=self._num_iters_2_by_2)
+
+ def benchmark_gen_math_ops_matmul_2_by_2_GPU(self):
+ if not context.num_gpus():
+ return
+ with context.device(GPU):
+ m = self._m_2_by_2.gpu()
+ self._benchmark_gen_math_ops_matmul(
+ m, transpose_b=False, num_iters=self._num_iters_2_by_2)
+
+ def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self):
+ if not context.num_gpus():
+ return
+ with context.device(GPU):
+ m = self._m_2_by_2.gpu()
+ self._benchmark_tfe_py_execute_matmul(
+ m, transpose_b=False, num_iters=self._num_iters_2_by_2)
+
+ def benchmark_defun_matmul_2_by_2_GPU(self):
+ if not context.num_gpus():
+ return
+ with context.device(GPU):
+ m = self._m_2_by_2.gpu()
+ self._benchmark_defun_matmul(
+ m, transpose_b=False, num_iters=self._num_iters_2_by_2)
+
+ # Benchmarks for AA.T, A of dimension 100 by 784.
+ def benchmark_np_matmul_100_by_784(self):
+ self._benchmark_np_matmul(
+ self._m_100_by_784,
+ transpose_b=True,
+ num_iters=self._num_iters_100_by_784)
+
+ def benchmark_tf_matmul_100_by_784_CPU(self):
+ with context.device(CPU):
+ m = self._m_100_by_784.cpu()
+ self._benchmark_tf_matmul(
+ m, transpose_b=True, num_iters=self._num_iters_100_by_784)
+
+ def benchmark_gen_math_ops_matmul_100_by_784_CPU(self):
+ with context.device(CPU):
+ m = self._m_100_by_784.cpu()
+ self._benchmark_gen_math_ops_matmul(
+ m, transpose_b=True, num_iters=self._num_iters_100_by_784)
+
+ def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self):
+ with context.device(CPU):
+ m = self._m_100_by_784.cpu()
+ self._benchmark_tfe_py_execute_matmul(
+ m, transpose_b=True, num_iters=self._num_iters_100_by_784)
+
+ def benchmark_defun_matmul_100_by_784_CPU(self):
+ with context.device(CPU):
+ m = self._m_100_by_784.cpu()
+ self._benchmark_defun_matmul(
+ m, transpose_b=True, num_iters=self._num_iters_100_by_784)
+
+ def benchmark_tf_matmul_100_by_784_GPU(self):
+ if not context.num_gpus():
+ return
+ with context.device(GPU):
+ m = self._m_100_by_784.gpu()
+ self._benchmark_tf_matmul(
+ m, transpose_b=True, num_iters=self._num_iters_100_by_784)
+
+ def benchmark_gen_math_ops_matmul_100_by_784_GPU(self):
+ if not context.num_gpus():
+ return
+ with context.device(GPU):
+ m = self._m_100_by_784.gpu()
+ self._benchmark_gen_math_ops_matmul(
+ m, transpose_b=True, num_iters=self._num_iters_100_by_784)
+
+ def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self):
+ if not context.num_gpus():
+ return
+ with context.device(GPU):
+ m = self._m_100_by_784.gpu()
+ self._benchmark_tfe_py_execute_matmul(
+ m, transpose_b=True, num_iters=self._num_iters_100_by_784)
+
+ def benchmark_defun_matmul_100_by_784_GPU(self):
+ if not context.num_gpus():
+ return
+ with context.device(GPU):
+ m = self._m_100_by_784.gpu()
+ self._benchmark_defun_matmul(
+ m, transpose_b=True, num_iters=self._num_iters_100_by_784)
if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- # Default iterations to 1 to keep continuos integration test times low.
- parser.add_argument(
- "--iters",
- type=int,
- default=1,
- help="Number of iterators for each test. None or 0 for auto-selection")
- FLAGS, unparsed = parser.parse_known_args()
- sys.argv = [sys.argv[0]] + unparsed
test.main()
diff --git a/tensorflow/python/eager/pywrap_tensor.cc b/tensorflow/python/eager/pywrap_tensor.cc
index 4cc8f91dbc..ca283862f9 100644
--- a/tensorflow/python/eager/pywrap_tensor.cc
+++ b/tensorflow/python/eager/pywrap_tensor.cc
@@ -720,8 +720,6 @@ PyObject* TFE_Py_TensorShapeSlice(PyObject* tensor_list, int slice_dim) {
.c_str());
return nullptr;
}
- // handle now owns the tensor. Release it from the smart pointer.
- tensor.release();
return EagerTensorFromHandle(handle);
}
diff --git a/tensorflow/python/estimator/canned/head.py b/tensorflow/python/estimator/canned/head.py
index 88d79de808..5d698b7cc3 100644
--- a/tensorflow/python/estimator/canned/head.py
+++ b/tensorflow/python/estimator/canned/head.py
@@ -323,6 +323,7 @@ def _check_weights_match_logits_and_reshape(weights, logits):
Consider logits of shape [D0, D1, ... DN, logits_dimension]. Weights shape
can be either:
* [D0, D1, ... DN, logits_dimension]
+ * [D0, D1, ... DN, 1]
* [D0, D1, ... DN]: In this case, weights is reshaped into
[D0, D1, ... DN, 1] to work with weight broadcasting rules.
@@ -502,7 +503,20 @@ def _multi_class_head_with_softmax_cross_entropy_loss(n_classes,
name=None):
"""Creates a '_Head' for multi class classification.
- This head expects to be fed integer labels specifying the class index.
+ The head expects `logits` with shape `[D0, D1, ... DN, n_classes]`.
+ In many applications, the shape is `[batch_size, n_classes]`.
+
+ `labels` must be a dense `Tensor` with shape matching `logits`, namely
+ `[D0, D1, ... DN, 1]`. If `label_vocabulary` given, `labels` must be a string
+ `Tensor` with values from the vocabulary. If `label_vocabulary` is not given,
+ `labels` must be an integer `Tensor` with values specifying the class index.
+
+ If `weight_column` is specified, weights must be of shape
+ `[D0, D1, ... DN]`, or `[D0, D1, ... DN, 1]`.
+
+ The loss is the weighted sum over the input dimensions. Namely, if the input
+ labels have shape `[batch_size, 1]`, the loss is the weighted sum over
+ `batch_size`.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
@@ -605,12 +619,18 @@ class _MultiClassHeadWithSoftmaxCrossEntropyLoss(_Head):
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode # Unused for this head.
- label_ids = self._label_ids(_check_and_reshape_dense_labels(labels, 1))
+ logits = ops.convert_to_tensor(logits)
+ labels = _check_dense_labels_match_logits_and_reshape(
+ labels=labels, logits=logits, expected_labels_dimension=1)
+ label_ids = self._label_ids(labels)
unweighted_loss = losses.sparse_softmax_cross_entropy(
labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
# Restore the squeezed dim, so unweighted_loss matches the weights shape.
- unweighted_loss = array_ops.expand_dims(unweighted_loss, axis=(1,))
+ unweighted_loss = array_ops.expand_dims(unweighted_loss, axis=-1)
weights = _weights(features, self._weight_column)
+ if self._weight_column is not None:
+ weights = _check_weights_match_logits_and_reshape(
+ weights=weights, logits=logits)
weighted_sum_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
# _weights() can return 1.
@@ -623,16 +643,32 @@ class _MultiClassHeadWithSoftmaxCrossEntropyLoss(_Head):
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
- """See `Head`."""
+ """Returns an `EstimatorSpec`.
+
+ Args:
+ features: Input `dict` of `Tensor` or `SparseTensor` objects.
+ mode: Estimator's `ModeKeys`.
+ logits: logits `Tensor` with shape `[D0, D1, ... DN, logits_dimension]`.
+ For many applications, the shape is `[batch_size, logits_dimension]`.
+ labels: Labels integer or string `Tensor` with shape matching `logits`,
+ namely `[D0, D1, ... DN, 1]`. `labels` is required argument when `mode`
+ equals `TRAIN` or `EVAL`.
+ train_op_fn: Function that takes a scalar loss `Tensor` and returns
+ `train_op`. Required in TRAIN mode.
+ Returns:
+ `EstimatorSpec`.
+ Raises:
+ ValueError: If `train_op_fn` is `None` in TRAIN mode.
+ """
with ops.name_scope(self._name, 'head'):
- logits = _check_logits(logits, self.logits_dimension)
+ logits = _check_logits_final_dim(logits, self.logits_dimension)
# Predict.
pred_keys = prediction_keys.PredictionKeys
with ops.name_scope(None, 'predictions', (logits,)):
- # class_ids's shape is [batch_size]
- class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASS_IDS)
- class_ids = array_ops.expand_dims(class_ids, axis=(1,))
+ # class_ids's shape is [D0, D1, ... DN].
+ class_ids = math_ops.argmax(logits, axis=-1, name=pred_keys.CLASS_IDS)
+ class_ids = array_ops.expand_dims(class_ids, axis=-1)
if self._label_vocabulary:
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=self._label_vocabulary,
@@ -1031,9 +1067,6 @@ class _RegressionHeadWithMeanSquaredErrorLoss(_Head):
self, features, mode, logits, labels=None, train_op_fn=None):
"""Returns an `EstimatorSpec`.
- Please note that,
- + All args must be passed via name.
-
Args:
features: Input `dict` of `Tensor` or `SparseTensor` objects.
mode: Estimator's `ModeKeys`.
diff --git a/tensorflow/python/estimator/canned/head_test.py b/tensorflow/python/estimator/canned/head_test.py
index 9f95618513..cfd7bc08c7 100644
--- a/tensorflow/python/estimator/canned/head_test.py
+++ b/tensorflow/python/estimator/canned/head_test.py
@@ -155,7 +155,9 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
logits=logits_placeholder,
labels=labels_placeholder)[0]
with self.test_session():
- with self.assertRaisesRegexp(errors.OpError, 'labels shape'):
+ with self.assertRaisesRegexp(
+ errors.InvalidArgumentError,
+ r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[2 2\]'):
weighted_sum_loss.eval({
logits_placeholder: logits_2x3,
labels_placeholder: labels_2x2
@@ -269,8 +271,8 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
labels=labels_placeholder)[0]
with self.test_session():
with self.assertRaisesRegexp(
- errors.OpError,
- 'logits and labels must have the same first dimension'):
+ errors.InvalidArgumentError,
+ r'\[expected_labels_shape: \] \[2 1\] \[labels_shape: \] \[3 1\]'):
weighted_sum_loss.eval({
labels_placeholder: values_3x1,
logits_placeholder: values_2x3
@@ -897,6 +899,158 @@ class MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):
expected_loss / np.sum(weights_3x1),
}, summary_str, tol)
+ def test_multi_dim_weighted_train_create_loss(self):
+ """Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2]."""
+ head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
+ n_classes=3, weight_column='weights')
+
+ logits = np.array([[[10, 0, 0], [12, 0, 0]],
+ [[0, 10, 0], [0, 15, 0]]], dtype=np.float32)
+ labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)
+ weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
+
+ # loss = cross_entropy(labels, logits) = [[0, 12], [0, 15]].
+ # weighted_sum_loss = 1*0 + 1.5*12 + 2*0 + 2.5*15 = 55.5
+ expected_weighted_sum_loss = 55.5
+ expected_example_weight_sum = np.sum(weights)
+ weighted_sum_loss, example_weight_sum, _ = head.create_loss(
+ features={'weights': weights},
+ mode=model_fn.ModeKeys.TRAIN,
+ logits=logits,
+ labels=labels)
+ with self.test_session():
+ _initialize_variables(self, monitored_session.Scaffold())
+ self.assertAllClose(
+ expected_weighted_sum_loss, weighted_sum_loss.eval(),
+ rtol=1e-2, atol=1e-2)
+ self.assertAllClose(
+ expected_example_weight_sum, example_weight_sum.eval())
+
+ def test_multi_dim_weighted_train(self):
+ """Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2]."""
+ head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
+ n_classes=3, weight_column='weights')
+
+ logits = np.array([[[10, 0, 0], [12, 0, 0]],
+ [[0, 10, 0], [0, 15, 0]]], dtype=np.float32)
+ labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)
+ weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
+ expected_train_result = 'my_train_op'
+ def _train_op_fn(loss):
+ return string_ops.string_join(
+ [constant_op.constant(expected_train_result),
+ string_ops.as_string(loss, precision=2)])
+
+ # loss = cross_entropy(labels, logits) = [[0, 12], [0, 15]].
+ # weighted_sum_loss = 1*0 + 1.5*12 + 2*0 + 2.5*15 = 55.5
+ expected_loss = 55.5
+ spec = head.create_estimator_spec(
+ features={'weights': weights},
+ mode=model_fn.ModeKeys.TRAIN,
+ logits=logits,
+ labels=labels,
+ train_op_fn=_train_op_fn)
+
+ # Assert predictions, loss, train_op, and summaries.
+ tol = 1e-2
+ with self.test_session() as sess:
+ _initialize_variables(self, spec.scaffold)
+ loss, train_result = sess.run((spec.loss, spec.train_op))
+ self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
+ self.assertEqual(
+ six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),
+ train_result)
+
+ def test_multi_dim_train_weights_wrong_inner_dim(self):
+ """Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 1]."""
+ head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
+ n_classes=3, weight_column='weights')
+ logits = np.array([[[10, 0, 0], [12, 0, 0]],
+ [[0, 10, 0], [0, 15, 0]]], dtype=np.float32)
+ labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)
+ weights = np.array([[1.], [2.]], dtype=np.float32)
+ def _no_op_train_fn(loss):
+ del loss
+ return control_flow_ops.no_op()
+
+ spec = head.create_estimator_spec(
+ features={'weights': weights},
+ mode=model_fn.ModeKeys.TRAIN,
+ logits=logits,
+ labels=labels,
+ train_op_fn=_no_op_train_fn)
+ with self.test_session():
+ _initialize_variables(self, monitored_session.Scaffold())
+ with self.assertRaisesRegexp(
+ errors.InvalidArgumentError,
+ r'\[logits_shape: \] \[2 2 3\] \[weights_shape: \] \[2 1\]'):
+ spec.loss.eval()
+
+ def test_multi_dim_train_weights_wrong_outer_dim(self):
+ """Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2, 2]."""
+ head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
+ n_classes=3, weight_column='weights')
+ logits = np.array([[[10, 0, 0], [12, 0, 0]],
+ [[0, 10, 0], [0, 15, 0]]], dtype=np.float32)
+ labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)
+ weights_placeholder = array_ops.placeholder(dtype=dtypes.float32)
+ def _no_op_train_fn(loss):
+ del loss
+ return control_flow_ops.no_op()
+
+ spec = head.create_estimator_spec(
+ features={'weights': weights_placeholder},
+ mode=model_fn.ModeKeys.TRAIN,
+ logits=logits,
+ labels=labels,
+ train_op_fn=_no_op_train_fn)
+ with self.test_session():
+ _initialize_variables(self, monitored_session.Scaffold())
+ with self.assertRaisesRegexp(
+ errors.InvalidArgumentError,
+ r'\[logits_shape: \]\s\[2 2 3\]\s\[weights_shape: \]\s\[2 2 2\]'):
+ spec.loss.eval({
+ weights_placeholder: np.array([[[1., 1.1], [1.5, 1.6]],
+ [[2., 2.1], [2.5, 2.6]]])})
+
+ def test_multi_dim_weighted_eval(self):
+ """Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2]."""
+ head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
+ n_classes=3, weight_column='weights')
+ logits = np.array([[[10, 0, 0], [12, 0, 0]],
+ [[0, 10, 0], [0, 15, 0]]], dtype=np.float32)
+ labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)
+ weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
+ # loss = cross_entropy(labels, logits) = [[0, 12], [0, 15]].
+ # weighted_sum_loss = 1*0 + 1.5*12 + 2*0 + 2.5*15 = 55.5
+ expected_loss = 55.5
+ # Create estimator spec.
+ spec = head.create_estimator_spec(
+ features={'weights': weights},
+ mode=model_fn.ModeKeys.EVAL,
+ logits=logits,
+ labels=labels)
+
+ keys = metric_keys.MetricKeys
+ expected_metrics = {
+ keys.LOSS_MEAN: expected_loss / np.sum(weights),
+ keys.ACCURACY: (1.*1. + 1.5*0. + 2.*1. + 2.5*0.) / np.sum(weights),
+ }
+
+ # Assert predictions, loss, and metrics.
+ tol = 1e-2
+ with self.test_session() as sess:
+ _initialize_variables(self, spec.scaffold)
+ value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
+ update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
+ loss, metrics = sess.run((spec.loss, update_ops))
+ self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
+ # Check results of both update (in `metrics`) and value ops.
+ self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
+ self.assertAllClose(
+ expected_metrics, {k: value_ops[k].eval() for k in value_ops},
+ rtol=tol, atol=tol)
+
class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):
diff --git a/tensorflow/python/estimator/warm_starting_util.py b/tensorflow/python/estimator/warm_starting_util.py
index 3f0218af83..e5655db082 100644
--- a/tensorflow/python/estimator/warm_starting_util.py
+++ b/tensorflow/python/estimator/warm_starting_util.py
@@ -46,10 +46,13 @@ class _WarmStartSettings(
ckpt_to_initialize_from: [Required] A string specifying the directory with
checkpoint file(s) or path to checkpoint from which to warm-start the
model parameters.
- col_to_prev_vocab: [Optional] Dict of `FeatureColumn` to path of the
- vocabulary used for the `FeatureColumn` in `ckpt_to_initialize_from`. If
- not explicitly provided, the vocabularies are assumed to be same between
- previous and present checkpoints.
+ col_to_prev_vocab: [Optional] Dict of `FeatureColumn` to vocabularies used
+ for the `FeatureColumn` in `ckpt_to_initialize_from`. Vocabularies can
+ be represented either by a string (path to vocabulary), or tuple of
+ (string, int), representing (path of the vocabulary, vocab_size) if only
+ `vocab_size` entries of the old vocabulary were used in the checkpoint. If
+ the dict is not explicitly provided, the vocabularies are assumed to be
+ same between previous and present checkpoints.
col_to_prev_tensor: [Optional] Dict of `FeatureColumn` to name of the
variable (corresponding to the `FeatureColumn`) in
`ckpt_to_initialize_from`. If not explicitly provided, the name of the
@@ -76,6 +79,13 @@ class _WarmStartSettings(
col_to_prev_vocab={sc_vocab_file: "old_vocab.txt"})
# Warm-start all weights but the parameters corresponding to "sc_vocab_file"
+ # have a different vocab from the one used in current checkpoint, and only
+ # 100 of those entries were used.
+ ws = _WarmStartSettings(ckpt_to_initialize_from="/tmp",
+ col_to_prev_vocab={sc_vocab_file:
+ ("old_vocab.txt", 100)})
+
+ # Warm-start all weights but the parameters corresponding to "sc_vocab_file"
# have a different vocab from the one used in current checkpoint and the
# parameters corresponding to "sc_vocab_list" have a different name from the
# current checkpoint.
@@ -214,6 +224,7 @@ def _warmstart_var_with_vocab(var,
current_vocab_size,
prev_ckpt,
prev_vocab_path,
+ previous_vocab_size=-1,
current_oov_buckets=0,
prev_tensor_name=None,
initializer=None):
@@ -239,6 +250,8 @@ def _warmstart_var_with_vocab(var,
to checkpoint. The given checkpoint must have tensor with name
`prev_tensor_name` (if not None) or tensor with name same as given `var`.
prev_vocab_path: Path to the vocab file used for the tensor in `prev_ckpt`.
+ previous_vocab_size: If provided, will constrain previous vocab to the first
+ `previous_vocab_size` entries. -1 means use the entire previous vocab.
current_oov_buckets: An `int` specifying the number of out-of-vocabulary
buckets used for given `var`.
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
@@ -284,6 +297,7 @@ def _warmstart_var_with_vocab(var,
old_tensor_name=prev_tensor_name,
new_row_vocab_size=current_vocab_size,
new_col_vocab_size=v_shape[1],
+ old_row_vocab_size=previous_vocab_size,
old_row_vocab_file=prev_vocab_path,
new_row_vocab_file=current_vocab_path,
old_col_vocab_file=None,
@@ -373,17 +387,30 @@ def _warmstart_input_layer(cols_to_vars, warmstart_settings):
vocabulary_file = col.vocabulary_file
vocabulary_size = col.vocabulary_size
num_oov_buckets = col.num_oov_buckets
- prev_vocab_path = warmstart_settings.col_to_prev_vocab.get(
+ prev_vocab = warmstart_settings.col_to_prev_vocab.get(
col, vocabulary_file)
- logging.info("Warm-starting column: {}; prev_vocab: {}; prev_tensor: {}".
- format(col.name, prev_vocab_path, (
- prev_tensor_name or "Unchanged")))
+ if isinstance(prev_vocab, str):
+ prev_vocab_path = prev_vocab
+ previous_vocab_size = -1
+ logging.info(
+ "Warm-starting column: {}; prev_vocab: {}; "
+ "prev_tensor: {}".format(col.name, prev_vocab_path,
+ (prev_tensor_name or "Unchanged")))
+ elif isinstance(prev_vocab, tuple):
+ prev_vocab_path = prev_vocab[0]
+ previous_vocab_size = prev_vocab[1]
+ logging.info("Warm-starting column: {}; prev_vocab: {} (first {} "
+ "entries); prev_tensor: {}".format(
+ col.name, prev_vocab_path, previous_vocab_size,
+ (prev_tensor_name or "Unchanged")))
+
_warmstart_var_with_vocab(
var,
current_vocab_path=vocabulary_file,
current_vocab_size=vocabulary_size,
prev_ckpt=warmstart_settings.ckpt_to_initialize_from,
prev_vocab_path=prev_vocab_path,
+ previous_vocab_size=previous_vocab_size,
current_oov_buckets=num_oov_buckets,
prev_tensor_name=prev_tensor_name,
initializer=initializer)
diff --git a/tensorflow/python/estimator/warm_starting_util_test.py b/tensorflow/python/estimator/warm_starting_util_test.py
index f488957fb4..a05dbfd744 100644
--- a/tensorflow/python/estimator/warm_starting_util_test.py
+++ b/tensorflow/python/estimator/warm_starting_util_test.py
@@ -318,6 +318,32 @@ class WarmStartingUtilTest(test.TestCase):
self.assertAllEqual([[2.], [1.5], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
+ def testWarmStartVarWithVocabConstrainedOldVocabSize(self):
+ prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
+ "old_vocab")
+ _, _ = self._create_prev_run_var(
+ "fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
+
+ # New vocab with elements in reverse order and one new element.
+ new_vocab_path = self._write_vocab(
+ ["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
+ # New session and new graph.
+ with ops.Graph().as_default() as g:
+ with self.test_session(graph=g) as sess:
+ fruit_weights = variable_scope.get_variable(
+ "fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
+ ws_util._warmstart_var_with_vocab(
+ fruit_weights,
+ new_vocab_path,
+ 5,
+ self.get_temp_dir(),
+ prev_vocab_path,
+ previous_vocab_size=2)
+ sess.run(variables.global_variables_initializer())
+ # Old vocabulary limited to ['apple', 'banana'].
+ self.assertAllEqual([[0.], [0.], [1.], [0.5], [0.]],
+ fruit_weights.eval(sess))
+
def testWarmStartVarWithVocabPrevVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
@@ -507,6 +533,51 @@ class WarmStartingUtilTest(test.TestCase):
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]},
sess)
+ def testWarmStartInputLayer_SparseColumnVocabularyConstrainedVocabSizes(self):
+ # Create old vocabulary, and use a size smaller than the total number of
+ # entries.
+ old_vocab_path = self._write_vocab(["apple", "guava", "banana"],
+ "old_vocab")
+ old_vocab_size = 2 # ['apple', 'guava']
+
+ # Create new vocab for sparse column "sc_vocab".
+ current_vocab_path = self._write_vocab(
+ ["apple", "banana", "guava", "orange"], "current_vocab")
+ # Create feature column. Only use 2 of the actual entries, resulting in
+ # ['apple', 'banana'] for the new vocabulary.
+ sc_vocab = fc.categorical_column_with_vocabulary_file(
+ "sc_vocab", vocabulary_file=current_vocab_path, vocabulary_size=2)
+
+ # Save checkpoint from which to warm-start.
+ self._create_prev_run_var(
+ "linear_model/sc_vocab/weights", shape=[2, 1], initializer=ones())
+
+ partitioner = lambda shape, dtype: [1] * len(shape)
+ # New graph, new session WITHOUT warmstarting.
+ with ops.Graph().as_default() as g:
+ with self.test_session(graph=g) as sess:
+ cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
+ sess.run(variables.global_variables_initializer())
+ # Without warmstarting, the weights should be initialized using default
+ # initializer (which is init_ops.zeros_initializer).
+ self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([2, 1])]},
+ sess)
+
+ # New graph, new session with warmstarting.
+ with ops.Graph().as_default() as g:
+ with self.test_session(graph=g) as sess:
+ cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
+ warmstart_settings = ws_util._WarmStartSettings(
+ ckpt_to_initialize_from=self.get_temp_dir(),
+ col_to_prev_vocab={
+ sc_vocab: (old_vocab_path, old_vocab_size)
+ })
+ ws_util._warmstart_input_layer(cols_to_vars, warmstart_settings)
+ sess.run(variables.global_variables_initializer())
+ # Verify weights were correctly warmstarted. 'banana' isn't in the
+ # first two entries of the old vocabulary, so it's newly initialized.
+ self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [[[1], [0]]]}, sess)
+
def testWarmStartInputLayer_BucketizedColumn(self):
# Create feature column.
real = fc.numeric_column("real")
diff --git a/tensorflow/python/framework/c_api_util.py b/tensorflow/python/framework/c_api_util.py
index ddababd5b8..1d0dd88dc5 100644
--- a/tensorflow/python/framework/c_api_util.py
+++ b/tensorflow/python/framework/c_api_util.py
@@ -20,6 +20,7 @@ from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow as c_api
+from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
@@ -49,22 +50,46 @@ class ScopedTFGraph(object):
c_api.TF_DeleteGraph(self.graph)
+class ScopedTFImportGraphDefOptions(object):
+ """Wrapper around TF_ImportGraphDefOptions that handles deletion."""
+
+ def __init__(self):
+ self.options = c_api.TF_NewImportGraphDefOptions()
+
+ def __del__(self):
+ # Note: when we're destructing the global context (i.e when the process is
+ # terminating) we can have already deleted other modules.
+ if c_api.TF_DeleteImportGraphDefOptions is not None:
+ c_api.TF_DeleteImportGraphDefOptions(self.options)
+
+
@tf_contextlib.contextmanager
-def tf_buffer():
+def tf_buffer(data=None):
"""Context manager that creates and deletes TF_Buffer.
Example usage:
- wtih tf_buffer() as buf:
+ with tf_buffer() as buf:
# get serialized graph def into buf
...
proto_data = c_api.TF_GetBuffer(buf)
graph_def.ParseFromString(compat.as_bytes(proto_data))
# buf has been deleted
+ with tf_buffer(some_string) as buf:
+ c_api.TF_SomeFunction(buf)
+ # buf has been deleted
+
+ Args:
+ data: An optional `bytes`, `str`, or `unicode` object. If not None, the
+ yielded buffer will contain this data.
+
Yields:
Created TF_Buffer
"""
- buf = c_api.TF_NewBuffer()
+ if data:
+ buf = c_api.TF_NewBufferFromString(compat.as_bytes(data))
+ else:
+ buf = c_api.TF_NewBuffer()
try:
yield buf
finally:
diff --git a/tensorflow/python/framework/graph_util_impl.py b/tensorflow/python/framework/graph_util_impl.py
index ce85747d7c..6c7b455388 100644
--- a/tensorflow/python/framework/graph_util_impl.py
+++ b/tensorflow/python/framework/graph_util_impl.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-
"""Helpers to manipulate a tensor graph in python.
"""
@@ -108,6 +107,46 @@ def _node_name(n):
return n.split(":")[0]
+def _extract_graph_summary(graph_def):
+ """Extracts useful information from the graph and returns them."""
+ name_to_input_name = {} # Keyed by the dest node name.
+ name_to_node = {} # Keyed by node name.
+
+ # Keeps track of node sequences. It is important to still output the
+ # operations in the original order.
+ name_to_seq_num = {} # Keyed by node name.
+ seq = 0
+ for node in graph_def.node:
+ n = _node_name(node.name)
+ name_to_node[n] = node
+ name_to_input_name[n] = [_node_name(x) for x in node.input]
+ name_to_seq_num[n] = seq
+ seq += 1
+ return name_to_input_name, name_to_node, name_to_seq_num
+
+
+def _assert_nodes_are_present(name_to_node, nodes):
+ """Assert that nodes are present in the graph."""
+ for d in nodes:
+ assert d in name_to_node, "%s is not in graph" % d
+
+
+def _bfs_for_reachable_nodes(target_nodes, name_to_input_name):
+ """Breadth first search for reachable nodes from target nodes."""
+ nodes_to_keep = set()
+ # Breadth first search to find all the nodes that we should keep.
+ next_to_visit = target_nodes[:]
+ while next_to_visit:
+ n = next_to_visit[0]
+ del next_to_visit[0]
+ if n in nodes_to_keep:
+ # Already visited this node.
+ continue
+ nodes_to_keep.add(n)
+ next_to_visit += name_to_input_name[n]
+ return nodes_to_keep
+
+
def extract_sub_graph(graph_def, dest_nodes):
"""Extract the subgraph that can reach any of the nodes in 'dest_nodes'.
@@ -127,40 +166,18 @@ def extract_sub_graph(graph_def, dest_nodes):
if isinstance(dest_nodes, six.string_types):
raise TypeError("dest_nodes must be a list.")
- edges = {} # Keyed by the dest node name.
- name_to_node_map = {} # Keyed by node name.
-
- # Keeps track of node sequences. It is important to still output the
- # operations in the original order.
- node_seq = {} # Keyed by node name.
- seq = 0
- for node in graph_def.node:
- n = _node_name(node.name)
- name_to_node_map[n] = node
- edges[n] = [_node_name(x) for x in node.input]
- node_seq[n] = seq
- seq += 1
-
- for d in dest_nodes:
- assert d in name_to_node_map, "%s is not in graph" % d
+ name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
+ graph_def)
+ _assert_nodes_are_present(name_to_node, dest_nodes)
- nodes_to_keep = set()
- # Breadth first search to find all the nodes that we should keep.
- next_to_visit = dest_nodes[:]
- while next_to_visit:
- n = next_to_visit[0]
- del next_to_visit[0]
- if n in nodes_to_keep:
- # Already visited this node.
- continue
- nodes_to_keep.add(n)
- next_to_visit += edges[n]
+ nodes_to_keep = _bfs_for_reachable_nodes(dest_nodes, name_to_input_name)
- nodes_to_keep_list = sorted(list(nodes_to_keep), key=lambda n: node_seq[n])
+ nodes_to_keep_list = sorted(
+ list(nodes_to_keep), key=lambda n: name_to_seq_num[n])
# Now construct the output GraphDef
out = graph_pb2.GraphDef()
for n in nodes_to_keep_list:
- out.node.extend([copy.deepcopy(name_to_node_map[n])])
+ out.node.extend([copy.deepcopy(name_to_node[n])])
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
@@ -181,7 +198,9 @@ def tensor_shape_from_node_def_name(graph, input_name):
return shape
-def convert_variables_to_constants(sess, input_graph_def, output_node_names,
+def convert_variables_to_constants(sess,
+ input_graph_def,
+ output_node_names,
variable_names_whitelist=None,
variable_names_blacklist=None):
"""Replaces all the variables in a graph with constants of the same values.
@@ -237,10 +256,10 @@ def convert_variables_to_constants(sess, input_graph_def, output_node_names,
dtype = input_node.attr["dtype"]
data = found_variables[input_node.name]
output_node.attr["dtype"].CopyFrom(dtype)
- output_node.attr["value"].CopyFrom(attr_value_pb2.AttrValue(
- tensor=tensor_util.make_tensor_proto(data,
- dtype=dtype.type,
- shape=data.shape)))
+ output_node.attr["value"].CopyFrom(
+ attr_value_pb2.AttrValue(
+ tensor=tensor_util.make_tensor_proto(
+ data, dtype=dtype.type, shape=data.shape)))
how_many_converted += 1
else:
output_node.CopyFrom(input_node)
diff --git a/tensorflow/python/framework/importer.py b/tensorflow/python/framework/importer.py
index c6b335e661..e4b94e1a34 100644
--- a/tensorflow/python/framework/importer.py
+++ b/tensorflow/python/framework/importer.py
@@ -25,8 +25,11 @@ import copy
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import types_pb2
+from tensorflow.python import pywrap_tensorflow as c_api
+from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
@@ -242,12 +245,6 @@ def import_graph_def(graph_def, input_map=None, return_elements=None,
input_map = _ProcessInputMapParam(input_map)
return_elements = _ProcessReturnElementsParam(return_elements)
- # Use a canonical representation for all tensor names.
- input_map = {_CanonicalInputName(k): v for k, v in input_map.items()}
- used_input_keys = set()
-
- name_to_op = {}
-
op_dict = op_def_registry.get_registered_ops()
if producer_op_list is None:
@@ -255,10 +252,28 @@ def import_graph_def(graph_def, input_map=None, return_elements=None,
else:
producer_op_dict = {op.name: op for op in producer_op_list.op}
- g = ops.get_default_graph()
- if g._c_graph: # pylint: disable=protected-access
- assert 'import_graph_def not yet implemented with C API'
+ graph = ops.get_default_graph()
+
+ if graph._c_graph: # pylint: disable=protected-access
+ scoped_options = c_api_util.ScopedTFImportGraphDefOptions()
+
+ with errors.raise_exception_on_not_ok_status() as status:
+ with c_api_util.tf_buffer(graph_def.SerializeToString()) as serialized:
+ c_api.TF_GraphImportGraphDefWithResults(
+ graph._c_graph, serialized, scoped_options.options, status) # pylint: disable=protected-access
+
+ if return_elements is not None:
+ raise ValueError('return_elements not yet implemented with C API')
+ return None
+
else:
+ g = graph
+
+ # Use a canonical representation for all tensor names.
+ input_map = {_CanonicalInputName(k): v for k, v in input_map.items()}
+ used_input_keys = set()
+ name_to_op = {}
+
# Add any functions defined in `graph_def` to `g`
if graph_def.library and graph_def.library.function:
# Copy op_dict so we don't clobber the original
diff --git a/tensorflow/python/framework/importer_test.py b/tensorflow/python/framework/importer_test.py
index e447f9a3e8..d27ec1e30c 100644
--- a/tensorflow/python/framework/importer_test.py
+++ b/tensorflow/python/framework/importer_test.py
@@ -31,6 +31,7 @@ from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
+from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
@@ -55,6 +56,28 @@ class ImportGraphDefTest(test.TestCase):
text_format.Merge(text, ret)
return ret
+ # The C API doesn't currently support return elements (or anything else beyond
+ # the most basic import). This test only checks that the import can run
+ # without error, and will be removed once more functionality is implemented
+ # and we can get coverage from the other tests.
+ @test_util.enable_c_api
+ def testCApi(self):
+ importer.import_graph_def(
+ self._MakeGraphDef("""
+ node { name: 'A' op: 'IntOutputFloatOutput' }
+ node { name: 'B' op: 'ListOutput'
+ attr { key: 'T'
+ value { list { type: DT_INT32 type: DT_FLOAT } } } }
+ node { name: 'C' op: 'ListInput'
+ attr { key: 'N' value { i: 2 } }
+ attr { key: 'T' value { type: DT_INT32 } }
+ input: 'A:0' input: 'B:0' }
+ node { name: 'D' op: 'ListInput'
+ attr { key: 'N' value { i: 2 } }
+ attr { key: 'T' value { type: DT_FLOAT } }
+ input: 'A:1' input: 'B:1' }
+ """))
+
def testBasic(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
@@ -108,6 +131,94 @@ class ImportGraphDefTest(test.TestCase):
# Check that the op_def is still available.
self.assertNotEqual(None, a.op_def)
+ def testMultipleImport(self):
+ graph_def = self._MakeGraphDef("""
+ node { name: 'A' op: 'IntOutput' }
+ node { name: 'B' op: 'IntInput' input: 'A:0' }
+ """)
+
+ with ops.Graph().as_default():
+ # Initial import
+ a, b = importer.import_graph_def(
+ graph_def,
+ return_elements=["A", "B"],
+ name="")
+ self.assertEqual(a.name, "A")
+ self.assertEqual(b.name, "B")
+ self.assertEqual(list(b.inputs), [a.outputs[0]])
+
+ # Repeat the same import
+ a1, b1 = importer.import_graph_def(
+ graph_def,
+ return_elements=["A", "B"],
+ name="")
+ self.assertEqual(a1.name, "A_1")
+ self.assertEqual(b1.name, "B_1")
+ self.assertEqual(list(b1.inputs), [a1.outputs[0]])
+
+ # Repeat the same import again
+ a2, b2 = importer.import_graph_def(
+ graph_def,
+ return_elements=["A", "B"],
+ name="")
+ self.assertEqual(a2.name, "A_2")
+ self.assertEqual(b2.name, "B_2")
+ self.assertEqual(list(b2.inputs), [a2.outputs[0]])
+
+ # Import with an already-used name
+ a3, b3 = importer.import_graph_def(
+ graph_def,
+ return_elements=["A", "B"],
+ name="A")
+ self.assertEqual(a3.name, "A_3/A")
+ self.assertEqual(b3.name, "A_3/B")
+ self.assertEqual(list(b3.inputs), [a3.outputs[0]])
+
+ # Import with existing de-duped node names
+ a4, b4 = importer.import_graph_def(
+ self._MakeGraphDef("""
+ node { name: 'A_1' op: 'IntOutput' }
+ node { name: 'B_1' op: 'IntInput' input: 'A_1:0' }
+ """),
+ return_elements=["A_1", "B_1"],
+ name="")
+ self.assertEqual(a4.name, "A_1_1")
+ self.assertEqual(b4.name, "B_1_1")
+ self.assertEqual(list(b4.inputs), [a4.outputs[0]])
+
+ # Create a name scope and then import node with same name
+ with ops.name_scope("foo"):
+ constant_op.constant(1)
+ foo, = importer.import_graph_def(
+ self._MakeGraphDef("node { name: 'foo' op: 'IntOutput' }"),
+ return_elements=["foo"],
+ name="")
+ self.assertEqual(foo.name, "foo_1")
+
+ # Imported node name can't conflict with intermediate name scope (but can
+ # conflict with outer scope and full name scope)
+ with ops.name_scope("outer"):
+ with ops.name_scope("inner"):
+ c = constant_op.constant(1, name="c")
+ self.assertEqual(c.op.name, "outer/inner/c")
+
+ outer, inner, new_c, outer_inner, outer_inner_c = (
+ importer.import_graph_def(
+ self._MakeGraphDef(
+ "node { name: 'outer' op: 'IntOutput' }"
+ "node { name: 'inner' op: 'IntOutput' }"
+ "node { name: 'c' op: 'IntOutput' }"
+ "node { name: 'outer/inner' op: 'IntOutput' }"
+ "node { name: 'outer/inner/c' op: 'IntOutput' }"),
+ return_elements=["outer", "inner", "c", "outer/inner",
+ "outer/inner/c"],
+ name=""))
+ self.assertEqual(outer.name, "outer_1")
+ self.assertEqual(inner.name, "inner")
+ self.assertEqual(new_c.name, "c")
+ self.assertEqual(outer_inner.name, "outer/inner_1")
+ self.assertEqual(outer_inner_c.name, "outer/inner/c_1")
+
def testInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index 95274374ad..6469aca3ec 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -2952,7 +2952,11 @@ class Graph(object):
if previous._hash_str == function._hash_str:
return
else:
- raise ValueError("Another function is already defined with that name")
+ raise ValueError("Cannot add function (%s, hash %s) to graph (%s). "
+ "Another function (%s, hash %s) is already defined "
+ "with that name (%s)" % (
+ function, function._hash_str, self,
+ previous, previous._hash_str, name))
# pylint: enable=protected-access
self._functions[name] = function
diff --git a/tensorflow/python/grappler/layout_optimizer_test.py b/tensorflow/python/grappler/layout_optimizer_test.py
index bda9502cd1..87f07c4a52 100644
--- a/tensorflow/python/grappler/layout_optimizer_test.py
+++ b/tensorflow/python/grappler/layout_optimizer_test.py
@@ -23,12 +23,18 @@ from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
+from tensorflow.python.grappler import tf_optimizer
+from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
+from tensorflow.python.training import gradient_descent
+from tensorflow.python.training import saver
def weight(shape):
@@ -133,6 +139,32 @@ class LayoutOptimizerTest(test.TestCase):
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
+ def testGradient(self):
+ if not test.is_gpu_available(cuda_only=True):
+ self.skipTest('GPU required')
+
+ random_seed.set_random_seed(0)
+ x = random_ops.truncated_normal([1, 200, 200, 3], seed=0)
+ y = conv_layers.conv2d(x, 32, [3, 3])
+ z = conv_layers.conv2d(y, 32, [3, 3])
+ optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
+ loss = math_ops.reduce_mean(z)
+ train_op = optimizer.minimize(loss)
+ graph = ops.get_default_graph()
+ graph.add_to_collection('train_op', train_op)
+ meta_graph = saver.export_meta_graph(graph_def=graph.as_graph_def())
+
+ rewrite_options = rewriter_config_pb2.RewriterConfig(
+ optimize_tensor_layout=True)
+ optimized_graph = tf_optimizer.OptimizeGraph(rewrite_options, meta_graph)
+
+ found = 0
+ for node in optimized_graph.node:
+ if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
+ found += 1
+ self.assertEqual(node.attr['data_format'].s, 'NCHW')
+ self.assertEqual(found, 5)
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/python/grappler/tf_optimizer.i b/tensorflow/python/grappler/tf_optimizer.i
index 09c19cb186..f3d8fe194b 100644
--- a/tensorflow/python/grappler/tf_optimizer.i
+++ b/tensorflow/python/grappler/tf_optimizer.i
@@ -104,8 +104,9 @@ PyObject* TF_OptimizeGraph(
tensorflow::DeviceBase* cpu_device = nullptr;
tensorflow::grappler::VirtualCluster cluster(device_map);
tensorflow::GraphDef out_graph;
- tensorflow::Status status = tensorflow::grappler::RunMetaOptimizer(
- *grappler_item, rewriter_config, cpu_device, &cluster, &out_graph);
+ tensorflow::grappler::MetaOptimizer optimizer(cpu_device, rewriter_config);
+ tensorflow::Status status = optimizer.Optimize(&cluster, *grappler_item, &out_graph);
+ optimizer.PrintResult();
tensorflow::Set_TF_Status_from_Status(out_status, status);
string out_graph_str = out_graph.SerializeAsString();
PyObject* ret = PyBytes_FromStringAndSize(out_graph_str.data(),
diff --git a/tensorflow/python/keras/BUILD b/tensorflow/python/keras/BUILD
index d61733dff6..4db48b45ed 100644
--- a/tensorflow/python/keras/BUILD
+++ b/tensorflow/python/keras/BUILD
@@ -15,6 +15,7 @@ py_library(
"_impl/keras/activations.py",
"_impl/keras/applications/__init__.py",
"_impl/keras/applications/imagenet_utils.py",
+ "_impl/keras/applications/inception_resnet_v2.py",
"_impl/keras/applications/inception_v3.py",
"_impl/keras/applications/mobilenet.py",
"_impl/keras/applications/resnet50.py",
@@ -73,6 +74,7 @@ py_library(
"_impl/keras/wrappers/scikit_learn.py",
"activations/__init__.py",
"applications/__init__.py",
+ "applications/inception_resnet_v2/__init__.py",
"applications/inception_v3/__init__.py",
"applications/mobilenet/__init__.py",
"applications/resnet50/__init__.py",
@@ -251,6 +253,18 @@ py_test(
)
py_test(
+ name = "inception_resnet_v2_test",
+ size = "medium",
+ srcs = ["_impl/keras/applications/inception_resnet_v2_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":keras",
+ "//tensorflow/python:client_testlib",
+ "//third_party/py/numpy",
+ ],
+)
+
+py_test(
name = "inception_v3_test",
size = "medium",
srcs = ["_impl/keras/applications/inception_v3_test.py"],
diff --git a/tensorflow/python/keras/__init__.py b/tensorflow/python/keras/__init__.py
index fa79889966..f56be967ff 100644
--- a/tensorflow/python/keras/__init__.py
+++ b/tensorflow/python/keras/__init__.py
@@ -42,6 +42,8 @@ from tensorflow.python.keras import utils
from tensorflow.python.keras import wrappers
from tensorflow.python.keras._impl.keras import __version__
from tensorflow.python.keras.layers import Input
+from tensorflow.python.keras.models import Model
+from tensorflow.python.keras.models import Sequential
del absolute_import
del division
diff --git a/tensorflow/python/keras/_impl/keras/__init__.py b/tensorflow/python/keras/_impl/keras/__init__.py
index a341065100..f0e8d91a92 100644
--- a/tensorflow/python/keras/_impl/keras/__init__.py
+++ b/tensorflow/python/keras/_impl/keras/__init__.py
@@ -37,5 +37,7 @@ from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras import utils
from tensorflow.python.keras._impl.keras import wrappers
from tensorflow.python.keras._impl.keras.layers import Input
+from tensorflow.python.keras._impl.keras.models import Model
+from tensorflow.python.keras._impl.keras.models import Sequential
__version__ = '2.0.8-tf'
diff --git a/tensorflow/python/keras/_impl/keras/applications/__init__.py b/tensorflow/python/keras/_impl/keras/applications/__init__.py
index f78bbdc148..c11c52b71e 100644
--- a/tensorflow/python/keras/_impl/keras/applications/__init__.py
+++ b/tensorflow/python/keras/_impl/keras/applications/__init__.py
@@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from tensorflow.python.keras._impl.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras._impl.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras._impl.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras._impl.keras.applications.resnet50 import ResNet50
diff --git a/tensorflow/python/keras/_impl/keras/applications/imagenet_utils.py b/tensorflow/python/keras/_impl/keras/applications/imagenet_utils.py
index 43628341cb..58841e5db0 100644
--- a/tensorflow/python/keras/_impl/keras/applications/imagenet_utils.py
+++ b/tensorflow/python/keras/_impl/keras/applications/imagenet_utils.py
@@ -29,12 +29,19 @@ CLASS_INDEX = None
CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json'
-def preprocess_input(x, data_format=None):
+def preprocess_input(x, data_format=None, mode='caffe'):
"""Preprocesses a tensor encoding a batch of images.
Arguments:
x: input Numpy tensor, 4D.
data_format: data format of the image tensor.
+ mode: One of "caffe", "tf".
+ - caffe: will convert the images from RGB to BGR,
+ then will zero-center each color channel with
+ respect to the ImageNet dataset,
+ without scaling.
+ - tf: will scale pixels between -1 and 1,
+ sample-wise.
Returns:
Preprocessed tensor.
@@ -43,6 +50,12 @@ def preprocess_input(x, data_format=None):
data_format = K.image_data_format()
assert data_format in {'channels_last', 'channels_first'}
+ if mode == 'tf':
+ x /= 255.
+ x -= 0.5
+ x *= 2.
+ return x
+
if data_format == 'channels_first':
if x.ndim == 3:
# 'RGB'->'BGR'
@@ -89,8 +102,10 @@ def decode_predictions(preds, top=5):
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
- fpath = get_file(
- 'imagenet_class_index.json', CLASS_INDEX_PATH, cache_subdir='models')
+ fpath = get_file('imagenet_class_index.json',
+ CLASS_INDEX_PATH,
+ cache_subdir='models',
+ file_hash='c2c37ea517e94d9795004a39431a14cb')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
diff --git a/tensorflow/python/keras/_impl/keras/applications/inception_resnet_v2.py b/tensorflow/python/keras/_impl/keras/applications/inception_resnet_v2.py
new file mode 100644
index 0000000000..de29b92575
--- /dev/null
+++ b/tensorflow/python/keras/_impl/keras/applications/inception_resnet_v2.py
@@ -0,0 +1,369 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Inception-ResNet V2 model for Keras.
+
+# Reference
+- [Inception-v4, Inception-ResNet and the Impact of
+ Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
+
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.keras._impl.keras import backend as K
+from tensorflow.python.keras._impl.keras.applications import imagenet_utils
+from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape
+from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import
+from tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs
+from tensorflow.python.keras._impl.keras.layers import Activation
+from tensorflow.python.keras._impl.keras.layers import AveragePooling2D
+from tensorflow.python.keras._impl.keras.layers import BatchNormalization
+from tensorflow.python.keras._impl.keras.layers import Concatenate
+from tensorflow.python.keras._impl.keras.layers import Conv2D
+from tensorflow.python.keras._impl.keras.layers import Dense
+from tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D
+from tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D
+from tensorflow.python.keras._impl.keras.layers import Input
+from tensorflow.python.keras._impl.keras.layers import Lambda
+from tensorflow.python.keras._impl.keras.layers import MaxPooling2D
+from tensorflow.python.keras._impl.keras.models import Model
+from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
+
+BASE_WEIGHT_URL = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.7/'
+
+
+def preprocess_input(x):
+ """Preprocesses a numpy array encoding a batch of images.
+
+ Arguments:
+ x: a 4D numpy array consists of RGB values within [0, 255].
+
+ Returns:
+ Preprocessed array.
+ """
+ return imagenet_utils.preprocess_input(x, mode='tf')
+
+
+def conv2d_bn(x,
+ filters,
+ kernel_size,
+ strides=1,
+ padding='same',
+ activation='relu',
+ use_bias=False,
+ name=None):
+ """Utility function to apply conv + BN.
+
+ Arguments:
+ x: input tensor.
+ filters: filters in `Conv2D`.
+ kernel_size: kernel size as in `Conv2D`.
+ strides: strides in `Conv2D`.
+ padding: padding mode in `Conv2D`.
+ activation: activation in `Conv2D`.
+ use_bias: whether to use a bias in `Conv2D`.
+ name: name of the ops; will become `name + '_ac'` for the activation
+ and `name + '_bn'` for the batch norm layer.
+
+ Returns:
+ Output tensor after applying `Conv2D` and `BatchNormalization`.
+ """
+ x = Conv2D(
+ filters,
+ kernel_size,
+ strides=strides,
+ padding=padding,
+ use_bias=use_bias,
+ name=name)(
+ x)
+ if not use_bias:
+ bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
+ bn_name = None if name is None else name + '_bn'
+ x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
+ if activation is not None:
+ ac_name = None if name is None else name + '_ac'
+ x = Activation(activation, name=ac_name)(x)
+ return x
+
+
+def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
+ """Adds a Inception-ResNet block.
+
+ This function builds 3 types of Inception-ResNet blocks mentioned
+ in the paper, controlled by the `block_type` argument (which is the
+ block name used in the official TF-slim implementation):
+ - Inception-ResNet-A: `block_type='block35'`
+ - Inception-ResNet-B: `block_type='block17'`
+ - Inception-ResNet-C: `block_type='block8'`
+
+ Arguments:
+ x: input tensor.
+ scale: scaling factor to scale the residuals (i.e., the output of
+ passing `x` through an inception module) before adding them
+ to the shortcut branch. Let `r` be the output from the residual
+ branch, the output of this block will be `x + scale * r`.
+ block_type: `'block35'`, `'block17'` or `'block8'`, determines
+ the network structure in the residual branch.
+ block_idx: an `int` used for generating layer names. The Inception-ResNet
+ blocks
+ are repeated many times in this network. We use `block_idx` to
+ identify
+ each of the repetitions. For example, the first Inception-ResNet-A
+ block
+ will have `block_type='block35', block_idx=0`, ane the layer names
+ will have
+ a common prefix `'block35_0'`.
+ activation: activation function to use at the end of the block
+ (see [activations](../activations.md)).
+ When `activation=None`, no activation is applied
+ (i.e., "linear" activation: `a(x) = x`).
+
+ Returns:
+ Output tensor for the block.
+
+ Raises:
+ ValueError: if `block_type` is not one of `'block35'`,
+ `'block17'` or `'block8'`.
+ """
+ if block_type == 'block35':
+ branch_0 = conv2d_bn(x, 32, 1)
+ branch_1 = conv2d_bn(x, 32, 1)
+ branch_1 = conv2d_bn(branch_1, 32, 3)
+ branch_2 = conv2d_bn(x, 32, 1)
+ branch_2 = conv2d_bn(branch_2, 48, 3)
+ branch_2 = conv2d_bn(branch_2, 64, 3)
+ branches = [branch_0, branch_1, branch_2]
+ elif block_type == 'block17':
+ branch_0 = conv2d_bn(x, 192, 1)
+ branch_1 = conv2d_bn(x, 128, 1)
+ branch_1 = conv2d_bn(branch_1, 160, [1, 7])
+ branch_1 = conv2d_bn(branch_1, 192, [7, 1])
+ branches = [branch_0, branch_1]
+ elif block_type == 'block8':
+ branch_0 = conv2d_bn(x, 192, 1)
+ branch_1 = conv2d_bn(x, 192, 1)
+ branch_1 = conv2d_bn(branch_1, 224, [1, 3])
+ branch_1 = conv2d_bn(branch_1, 256, [3, 1])
+ branches = [branch_0, branch_1]
+ else:
+ raise ValueError('Unknown Inception-ResNet block type. '
+ 'Expects "block35", "block17" or "block8", '
+ 'but got: ' + str(block_type))
+
+ block_name = block_type + '_' + str(block_idx)
+ channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
+ mixed = Concatenate(axis=channel_axis, name=block_name + '_mixed')(branches)
+ up = conv2d_bn(
+ mixed,
+ K.int_shape(x)[channel_axis],
+ 1,
+ activation=None,
+ use_bias=True,
+ name=block_name + '_conv')
+
+ x = Lambda(
+ lambda inputs, scale: inputs[0] + inputs[1] * scale,
+ arguments={'scale': scale},
+ name=block_name)([x, up])
+ if activation is not None:
+ x = Activation(activation, name=block_name + '_ac')(x)
+ return x
+
+
+def InceptionResNetV2(include_top=True, # pylint: disable=invalid-name
+ weights='imagenet',
+ input_tensor=None,
+ input_shape=None,
+ pooling=None,
+ classes=1000):
+ """Instantiates the Inception-ResNet v2 architecture.
+
+ Optionally loads weights pre-trained on ImageNet.
+ Note that when using TensorFlow, for best performance you should
+ set `"image_data_format": "channels_last"` in your Keras config
+ at `~/.keras/keras.json`.
+
+ The model and the weights are compatible with TensorFlow, Theano and
+ CNTK backends. The data format convention used by the model is
+ the one specified in your Keras config file.
+
+ Note that the default input image size for this model is 299x299, instead
+ of 224x224 as in the VGG16 and ResNet models. Also, the input preprocessing
+ function is different (i.e., do not use `imagenet_utils.preprocess_input()`
+ with this model. Use `preprocess_input()` defined in this module instead).
+
+ Arguments:
+ include_top: whether to include the fully-connected
+ layer at the top of the network.
+ weights: one of `None` (random initialization)
+ or `'imagenet'` (pre-training on ImageNet).
+ input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
+ to use as image input for the model.
+ input_shape: optional shape tuple, only to be specified
+ if `include_top` is `False` (otherwise the input shape
+ has to be `(299, 299, 3)` (with `'channels_last'` data format)
+ or `(3, 299, 299)` (with `'channels_first'` data format).
+ It should have exactly 3 inputs channels,
+ and width and height should be no smaller than 139.
+ E.g. `(150, 150, 3)` would be one valid value.
+ pooling: Optional pooling mode for feature extraction
+ when `include_top` is `False`.
+ - `None` means that the output of the model will be
+ the 4D tensor output of the last convolutional layer.
+ - `'avg'` means that global average pooling
+ will be applied to the output of the
+ last convolutional layer, and thus
+ the output of the model will be a 2D tensor.
+ - `'max'` means that global max pooling will be applied.
+ classes: optional number of classes to classify images
+ into, only to be specified if `include_top` is `True`, and
+ if no `weights` argument is specified.
+
+ Returns:
+ A Keras `Model` instance.
+
+ Raises:
+ ValueError: in case of invalid argument for `weights`,
+ or invalid input shape.
+ """
+ if weights not in {'imagenet', None}:
+ raise ValueError('The `weights` argument should be either '
+ '`None` (random initialization) or `imagenet` '
+ '(pre-training on ImageNet).')
+
+ if weights == 'imagenet' and include_top and classes != 1000:
+ raise ValueError('If using `weights` as imagenet with `include_top`'
+ ' as true, `classes` should be 1000')
+
+ # Determine proper input shape
+ input_shape = _obtain_input_shape(
+ input_shape,
+ default_size=299,
+ min_size=139,
+ data_format=K.image_data_format(),
+ require_flatten=False,
+ weights=weights)
+
+ if input_tensor is None:
+ img_input = Input(shape=input_shape)
+ else:
+ if not K.is_keras_tensor(input_tensor):
+ img_input = Input(tensor=input_tensor, shape=input_shape)
+ else:
+ img_input = input_tensor
+
+ # Stem block: 35 x 35 x 192
+ x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
+ x = conv2d_bn(x, 32, 3, padding='valid')
+ x = conv2d_bn(x, 64, 3)
+ x = MaxPooling2D(3, strides=2)(x)
+ x = conv2d_bn(x, 80, 1, padding='valid')
+ x = conv2d_bn(x, 192, 3, padding='valid')
+ x = MaxPooling2D(3, strides=2)(x)
+
+ # Mixed 5b (Inception-A block): 35 x 35 x 320
+ branch_0 = conv2d_bn(x, 96, 1)
+ branch_1 = conv2d_bn(x, 48, 1)
+ branch_1 = conv2d_bn(branch_1, 64, 5)
+ branch_2 = conv2d_bn(x, 64, 1)
+ branch_2 = conv2d_bn(branch_2, 96, 3)
+ branch_2 = conv2d_bn(branch_2, 96, 3)
+ branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
+ branch_pool = conv2d_bn(branch_pool, 64, 1)
+ branches = [branch_0, branch_1, branch_2, branch_pool]
+ channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
+ x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)
+
+ # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
+ for block_idx in range(1, 11):
+ x = inception_resnet_block(
+ x, scale=0.17, block_type='block35', block_idx=block_idx)
+
+ # Mixed 6a (Reduction-A block): 17 x 17 x 1088
+ branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
+ branch_1 = conv2d_bn(x, 256, 1)
+ branch_1 = conv2d_bn(branch_1, 256, 3)
+ branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
+ branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
+ branches = [branch_0, branch_1, branch_pool]
+ x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)
+
+ # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
+ for block_idx in range(1, 21):
+ x = inception_resnet_block(
+ x, scale=0.1, block_type='block17', block_idx=block_idx)
+
+ # Mixed 7a (Reduction-B block): 8 x 8 x 2080
+ branch_0 = conv2d_bn(x, 256, 1)
+ branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
+ branch_1 = conv2d_bn(x, 256, 1)
+ branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
+ branch_2 = conv2d_bn(x, 256, 1)
+ branch_2 = conv2d_bn(branch_2, 288, 3)
+ branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
+ branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
+ branches = [branch_0, branch_1, branch_2, branch_pool]
+ x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)
+
+ # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
+ for block_idx in range(1, 10):
+ x = inception_resnet_block(
+ x, scale=0.2, block_type='block8', block_idx=block_idx)
+ x = inception_resnet_block(
+ x, scale=1., activation=None, block_type='block8', block_idx=10)
+
+ # Final convolution block: 8 x 8 x 1536
+ x = conv2d_bn(x, 1536, 1, name='conv_7b')
+
+ if include_top:
+ # Classification block
+ x = GlobalAveragePooling2D(name='avg_pool')(x)
+ x = Dense(classes, activation='softmax', name='predictions')(x)
+ else:
+ if pooling == 'avg':
+ x = GlobalAveragePooling2D()(x)
+ elif pooling == 'max':
+ x = GlobalMaxPooling2D()(x)
+
+ # Ensure that the model takes into account
+ # any potential predecessors of `input_tensor`
+ if input_tensor is not None:
+ inputs = get_source_inputs(input_tensor)
+ else:
+ inputs = img_input
+
+ # Create model
+ model = Model(inputs, x, name='inception_resnet_v2')
+
+ # Load weights
+ if weights == 'imagenet':
+ if include_top:
+ fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
+ weights_path = get_file(
+ fname,
+ BASE_WEIGHT_URL + fname,
+ cache_subdir='models',
+ file_hash='e693bd0210a403b3192acc6073ad2e96')
+ else:
+ fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5'
+ weights_path = get_file(
+ fname,
+ BASE_WEIGHT_URL + fname,
+ cache_subdir='models',
+ file_hash='d19885ff4a710c122648d3b5c3b684e4')
+ model.load_weights(weights_path)
+
+ return model
diff --git a/tensorflow/python/keras/_impl/keras/applications/inception_resnet_v2_test.py b/tensorflow/python/keras/_impl/keras/applications/inception_resnet_v2_test.py
new file mode 100644
index 0000000000..de71e9615a
--- /dev/null
+++ b/tensorflow/python/keras/_impl/keras/applications/inception_resnet_v2_test.py
@@ -0,0 +1,59 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for Inception V3 application."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.python.keras._impl import keras
+from tensorflow.python.platform import test
+
+
+class InceptionResNetV2Test(test.TestCase):
+
+ def test_with_top(self):
+ model = keras.applications.InceptionResNetV2(weights=None)
+ self.assertEqual(model.output_shape, (None, 1000))
+
+ def test_no_top(self):
+ model = keras.applications.InceptionResNetV2(weights=None,
+ include_top=False)
+ self.assertEqual(model.output_shape, (None, None, None, 1536))
+
+ def test_with_pooling(self):
+ model = keras.applications.InceptionResNetV2(weights=None,
+ include_top=False,
+ pooling='avg')
+ self.assertEqual(model.output_shape, (None, 1536))
+
+ def test_weight_loading(self):
+ with self.assertRaises(ValueError):
+ keras.applications.InceptionResNetV2(weights='unknown',
+ include_top=False)
+ with self.assertRaises(ValueError):
+ keras.applications.InceptionResNetV2(weights='imagenet',
+ classes=2000)
+
+ def test_preprocess_input(self):
+ x = np.random.uniform(0, 255, (2, 300, 200, 3))
+ out1 = keras.applications.inception_resnet_v2.preprocess_input(x)
+ self.assertAllClose(np.mean(out1), 0., atol=0.1)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/python/keras/_impl/keras/applications/inception_v3.py b/tensorflow/python/keras/_impl/keras/applications/inception_v3.py
index edb4c60f8a..d4fea4fbb0 100644
--- a/tensorflow/python/keras/_impl/keras/applications/inception_v3.py
+++ b/tensorflow/python/keras/_impl/keras/applications/inception_v3.py
@@ -31,6 +31,7 @@ from __future__ import print_function
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import layers
+from tensorflow.python.keras._impl.keras.applications import imagenet_utils
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import
from tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs
@@ -374,19 +375,24 @@ def InceptionV3(include_top=True,
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
- md5_hash='9a0d58056eeedaa3f26cb7ebd46da564')
+ file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
- md5_hash='bcbd6486424b2319ff4ef7d526e38f63')
+ file_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
return model
def preprocess_input(x):
- x /= 255.
- x -= 0.5
- x *= 2.
- return x
+ """Preprocesses a numpy array encoding a batch of images.
+
+ Arguments:
+ x: a 4D numpy array consists of RGB values within [0, 255].
+
+ Returns:
+ Preprocessed array.
+ """
+ return imagenet_utils.preprocess_input(x, mode='tf')
diff --git a/tensorflow/python/keras/_impl/keras/applications/mobilenet.py b/tensorflow/python/keras/_impl/keras/applications/mobilenet.py
index f6482d2549..653bd8c09f 100644
--- a/tensorflow/python/keras/_impl/keras/applications/mobilenet.py
+++ b/tensorflow/python/keras/_impl/keras/applications/mobilenet.py
@@ -73,6 +73,7 @@ from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import regularizers
+from tensorflow.python.keras._impl.keras.applications import imagenet_utils
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import
from tensorflow.python.keras._impl.keras.engine import InputSpec
@@ -97,10 +98,15 @@ def relu6(x):
def preprocess_input(x):
- x /= 255.
- x -= 0.5
- x *= 2.
- return x
+ """Preprocesses a numpy array encoding a batch of images.
+
+ Arguments:
+ x: a 4D numpy array consists of RGB values within [0, 255].
+
+ Returns:
+ Preprocessed array.
+ """
+ return imagenet_utils.preprocess_input(x, mode='tf')
class DepthwiseConv2D(Conv2D):
diff --git a/tensorflow/python/keras/_impl/keras/applications/resnet50.py b/tensorflow/python/keras/_impl/keras/applications/resnet50.py
index f0cff2d686..717b626fdc 100644
--- a/tensorflow/python/keras/_impl/keras/applications/resnet50.py
+++ b/tensorflow/python/keras/_impl/keras/applications/resnet50.py
@@ -56,7 +56,7 @@ def identity_block(input_tensor, kernel_size, filters, stage, block):
Arguments:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
- filters: list of integers, the filterss of 3 conv layer at main path
+ filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
@@ -95,7 +95,7 @@ def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2,
Arguments:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
- filters: list of integers, the filterss of 3 conv layer at main path
+ filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Tuple of integers.
diff --git a/tensorflow/python/keras/_impl/keras/applications/vgg16.py b/tensorflow/python/keras/_impl/keras/applications/vgg16.py
index 485b486e9d..a0862e6407 100644
--- a/tensorflow/python/keras/_impl/keras/applications/vgg16.py
+++ b/tensorflow/python/keras/_impl/keras/applications/vgg16.py
@@ -192,12 +192,14 @@ def VGG16(include_top=True,
weights_path = get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
- cache_subdir='models')
+ cache_subdir='models',
+ file_hash='64373286793e3c8b2b4e3219cbf3544b')
else:
weights_path = get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
- cache_subdir='models')
+ cache_subdir='models',
+ file_hash='6d6bbae143d832006294945121d1f1fc')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
diff --git a/tensorflow/python/keras/_impl/keras/applications/vgg19.py b/tensorflow/python/keras/_impl/keras/applications/vgg19.py
index 3af6417c84..cfa1c95336 100644
--- a/tensorflow/python/keras/_impl/keras/applications/vgg19.py
+++ b/tensorflow/python/keras/_impl/keras/applications/vgg19.py
@@ -198,12 +198,14 @@ def VGG19(include_top=True,
weights_path = get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
- cache_subdir='models')
+ cache_subdir='models',
+ file_hash='cbe5617147190e668d6c5d5026f83318')
else:
weights_path = get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
- cache_subdir='models')
+ cache_subdir='models',
+ file_hash='253f8cb515780f3b799900260a226db6')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
diff --git a/tensorflow/python/keras/_impl/keras/applications/xception.py b/tensorflow/python/keras/_impl/keras/applications/xception.py
index 6e521daa2d..14f6ad8090 100644
--- a/tensorflow/python/keras/_impl/keras/applications/xception.py
+++ b/tensorflow/python/keras/_impl/keras/applications/xception.py
@@ -38,6 +38,7 @@ from __future__ import print_function
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import layers
+from tensorflow.python.keras._impl.keras.applications import imagenet_utils
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import
from tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs
@@ -287,12 +288,14 @@ def Xception(include_top=True,
weights_path = get_file(
'xception_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
- cache_subdir='models')
+ cache_subdir='models',
+ file_hash='0a58e3b7378bc2990ea3b43d5981f1f6')
else:
weights_path = get_file(
'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
- cache_subdir='models')
+ cache_subdir='models',
+ file_hash='b0042744bf5b25fce3cb969f33bebb97')
model.load_weights(weights_path)
if old_data_format:
@@ -301,7 +304,12 @@ def Xception(include_top=True,
def preprocess_input(x):
- x /= 255.
- x -= 0.5
- x *= 2.
- return x
+ """Preprocesses a numpy array encoding a batch of images.
+
+ Arguments:
+ x: a 4D numpy array consists of RGB values within [0, 255].
+
+ Returns:
+ Preprocessed array.
+ """
+ return imagenet_utils.preprocess_input(x, mode='tf')
diff --git a/tensorflow/python/keras/_impl/keras/backend.py b/tensorflow/python/keras/_impl/keras/backend.py
index f7f582bfe7..f9a53c4eb4 100644
--- a/tensorflow/python/keras/_impl/keras/backend.py
+++ b/tensorflow/python/keras/_impl/keras/backend.py
@@ -90,6 +90,11 @@ _EPSILON = 10e-8
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = 'channels_last'
+# This list holds the available devices.
+# It is populated when `_get_available_gpus()` is called for the first time.
+# We assume our devices don't change henceforth.
+_LOCAL_DEVICES = None
+
def backend():
"""Publicly accessible method for determining the current backend.
@@ -442,8 +447,10 @@ def _get_available_gpus():
Returns:
A list of available GPU devices.
"""
- devices = get_session().list_devices()
- return [x.name for x in devices if x.device_type == 'GPU']
+ global _LOCAL_DEVICES
+ if _LOCAL_DEVICES is None:
+ _LOCAL_DEVICES = get_session().list_devices()
+ return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
diff --git a/tensorflow/python/keras/applications/__init__.py b/tensorflow/python/keras/applications/__init__.py
index e34d9a8e0b..34f1435ffb 100644
--- a/tensorflow/python/keras/applications/__init__.py
+++ b/tensorflow/python/keras/applications/__init__.py
@@ -18,12 +18,14 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from tensorflow.python.keras.applications import inception_resnet_v2
from tensorflow.python.keras.applications import inception_v3
from tensorflow.python.keras.applications import mobilenet
from tensorflow.python.keras.applications import resnet50
from tensorflow.python.keras.applications import vgg16
from tensorflow.python.keras.applications import vgg19
from tensorflow.python.keras.applications import xception
+from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras.applications.mobilenet import MobileNet
from tensorflow.python.keras.applications.resnet50 import ResNet50
diff --git a/tensorflow/python/keras/applications/inception_resnet_v2/__init__.py b/tensorflow/python/keras/applications/inception_resnet_v2/__init__.py
new file mode 100644
index 0000000000..223660e9be
--- /dev/null
+++ b/tensorflow/python/keras/applications/inception_resnet_v2/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""InceptionResNetV2 Keras application."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.keras._impl.keras.applications.inception_resnet_v2 import decode_predictions
+from tensorflow.python.keras._impl.keras.applications.inception_resnet_v2 import InceptionResNetV2
+from tensorflow.python.keras._impl.keras.applications.inception_resnet_v2 import preprocess_input
+
+del absolute_import
+del division
+del print_function
diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD
index cbb9ac2a74..7fa504e85e 100644
--- a/tensorflow/python/kernel_tests/BUILD
+++ b/tensorflow/python/kernel_tests/BUILD
@@ -505,6 +505,18 @@ tf_py_test(
],
)
+tf_py_test(
+ name = "matrix_exponential_op_test",
+ size = "small",
+ srcs = ["matrix_exponential_op_test.py"],
+ additional_deps = [
+ "//third_party/py/numpy",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_for_generated_wrappers",
+ "//tensorflow/python:linalg_ops",
+ ],
+)
+
cuda_py_test(
name = "matrix_inverse_op_test",
size = "small",
@@ -2860,6 +2872,20 @@ tf_py_test(
)
tf_py_test(
+ name = "garbage_collection_test",
+ size = "small",
+ srcs = ["garbage_collection_test.py"],
+ additional_deps = [
+ "//tensorflow/python/eager:context",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:resource_variable_ops",
+ "//tensorflow/python:dtypes",
+ "//tensorflow/python:tensor_array_ops",
+ "//tensorflow/python:client_testlib",
+ ],
+)
+
+tf_py_test(
name = "list_files_dataset_op_test",
size = "small",
srcs = ["list_files_dataset_op_test.py"],
diff --git a/tensorflow/python/kernel_tests/checkpoint_ops_test.py b/tensorflow/python/kernel_tests/checkpoint_ops_test.py
index d2eb3eb801..a786d0a47e 100644
--- a/tensorflow/python/kernel_tests/checkpoint_ops_test.py
+++ b/tensorflow/python/kernel_tests/checkpoint_ops_test.py
@@ -87,6 +87,21 @@ class GenerateVocabRemappingTest(test.TestCase):
self.assertAllEqual(expected_remapping, remapping.eval())
self.assertAllEqual(expected_num_present, num_present.eval())
+ def test_generate_remapping_with_old_vocab_size(self):
+ """Tests where old_vocab_size is specified."""
+ remapping, num_present = gen_checkpoint_ops._generate_vocab_remapping(
+ new_vocab_file=self.new_vocab_file,
+ old_vocab_file=self.old_vocab_file,
+ num_new_vocab=3,
+ new_vocab_offset=0,
+ # Old vocabulary becomes ['knitting', 'eminem'].
+ old_vocab_size=2)
+ expected_remapping = [-1, 0, 1]
+ expected_num_present = 2
+ with self.test_session():
+ self.assertAllEqual(expected_remapping, remapping.eval())
+ self.assertAllEqual(expected_num_present, num_present.eval())
+
class LoadAndRemapMatrixTest(test.TestCase):
"""Tests for the load_and_remap_matrix() op."""
diff --git a/tensorflow/python/kernel_tests/depthtospace_op_test.py b/tensorflow/python/kernel_tests/depthtospace_op_test.py
index 792806642a..7df2366954 100644
--- a/tensorflow/python/kernel_tests/depthtospace_op_test.py
+++ b/tensorflow/python/kernel_tests/depthtospace_op_test.py
@@ -284,11 +284,16 @@ class DepthToSpaceTest(test.TestCase):
class DepthToSpaceGradientTest(test.TestCase):
# Check the gradients.
- def _checkGrad(self, x, block_size):
+ def _checkGrad(self, x, block_size, data_format):
+ # NCHW is implemented for only GPU.
+ if data_format == "NCHW" and not test.is_gpu_available():
+ return
+
assert 4 == x.ndim
with self.test_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
- tf_y = array_ops.depth_to_space(tf_x, block_size)
+ tf_y = array_ops.depth_to_space(tf_x, block_size, data_format=data_format)
+
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
@@ -297,28 +302,32 @@ class DepthToSpaceGradientTest(test.TestCase):
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
-
- self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
+ self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for depth_to_space of x which is a four dimensional
# tensor of shape [b, h, w, d * block_size * block_size].
- def _compare(self, b, h, w, d, block_size):
+ def _compare(self, b, h, w, d, block_size, data_format):
block_size_sq = block_size * block_size
- x = np.random.normal(
- 0, 1, b * h * w * d * block_size_sq).astype(np.float32).reshape(
- [b, h, w, d * block_size_sq])
+ data = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(
+ np.float32)
+ if data_format == "NHWC":
+ x = data.reshape([b, h, w, d * block_size_sq])
+ else:
+ x = data.reshape([b, d * block_size_sq, h, w])
- self._checkGrad(x, block_size)
+ self._checkGrad(x, block_size, data_format)
# Don't use very large numbers as dimensions here, as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
- self._compare(3, 2, 5, 3, block_size)
+ self._compare(3, 2, 5, 3, block_size, "NHWC")
+ self._compare(3, 2, 5, 3, block_size, "NCHW")
def testSmall2(self):
block_size = 3
- self._compare(1, 2, 3, 2, block_size)
+ self._compare(1, 2, 3, 2, block_size, "NHWC")
+ self._compare(1, 2, 3, 2, block_size, "NCHW")
if __name__ == "__main__":
diff --git a/tensorflow/python/kernel_tests/distributions/special_math_test.py b/tensorflow/python/kernel_tests/distributions/special_math_test.py
index dc462bae56..9441cdbe39 100644
--- a/tensorflow/python/kernel_tests/distributions/special_math_test.py
+++ b/tensorflow/python/kernel_tests/distributions/special_math_test.py
@@ -24,6 +24,7 @@ import importlib
import numpy as np
from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
@@ -90,6 +91,21 @@ class NdtriTest(test.TestCase):
x = special_math.ndtri(p)
self.assertAllClose(expected_x, x.eval(), atol=0.)
+ def testNdtriDynamicShape(self):
+ """Verifies that ndtri computation is correct."""
+ with self.test_session() as sess:
+ if not special:
+ return
+
+ p = array_ops.placeholder(np.float32)
+ p_ = np.linspace(0., 1.0, 50).astype(np.float32)
+
+ x = special_math.ndtri(p)
+ x_ = sess.run(x, feed_dict={p: p_})
+
+ expected_x_ = special.ndtri(p_)
+ self.assertAllClose(expected_x_, x_, atol=0.)
+
def _baseNdtriFiniteGradientTest(self, dtype):
"""Verifies that ndtri has finite gradients at interesting points."""
g = ops.Graph()
diff --git a/tensorflow/python/kernel_tests/garbage_collection_test.py b/tensorflow/python/kernel_tests/garbage_collection_test.py
new file mode 100644
index 0000000000..24a6ee74c5
--- /dev/null
+++ b/tensorflow/python/kernel_tests/garbage_collection_test.py
@@ -0,0 +1,88 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests which set DEBUG_SAVEALL and assert no garbage was created.
+
+This flag seems to be sticky, so these tests have been isolated for now.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gc
+
+from tensorflow.python.eager import context
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import test_util
+from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.ops import tensor_array_ops
+from tensorflow.python.platform import test
+
+
+def assert_no_garbage_created(f):
+ """Test decorator to assert that no garbage has been created."""
+
+ def decorator(self):
+ """Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
+ gc.disable()
+ previous_debug_flags = gc.get_debug()
+ gc.set_debug(gc.DEBUG_SAVEALL)
+ gc.collect()
+ previous_garbage = len(gc.garbage)
+ f(self)
+ gc.collect()
+ # This will fail if any garbage has been created, typically because of a
+ # reference cycle.
+ self.assertEqual(previous_garbage, len(gc.garbage))
+ # TODO(allenl): Figure out why this debug flag reset doesn't work. It would
+ # be nice to be able to decorate arbitrary tests in a large test suite and
+ # not hold on to every object in other tests.
+ gc.set_debug(previous_debug_flags)
+ gc.enable()
+ return decorator
+
+
+class NoReferenceCycleTests(test_util.TensorFlowTestCase):
+
+ @assert_no_garbage_created
+ def testEagerResourceVariables(self):
+ with context.eager_mode():
+ resource_variable_ops.ResourceVariable(1.0, name="a")
+
+ @assert_no_garbage_created
+ def testTensorArrays(self):
+ with context.eager_mode():
+ ta = tensor_array_ops.TensorArray(
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=3,
+ infer_shape=False)
+
+ w0 = ta.write(0, [[4.0, 5.0]])
+ w1 = w0.write(1, [[1.0]])
+ w2 = w1.write(2, -3.0)
+
+ r0 = w2.read(0)
+ r1 = w2.read(1)
+ r2 = w2.read(2)
+
+ d0, d1, d2 = self.evaluate([r0, r1, r2])
+ self.assertAllEqual([[4.0, 5.0]], d0)
+ self.assertAllEqual([[1.0]], d1)
+ self.assertAllEqual(-3.0, d2)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/python/kernel_tests/matrix_exponential_op_test.py b/tensorflow/python/kernel_tests/matrix_exponential_op_test.py
new file mode 100644
index 0000000000..c5a7a3ba99
--- /dev/null
+++ b/tensorflow/python/kernel_tests/matrix_exponential_op_test.py
@@ -0,0 +1,196 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tensorflow.ops.gen_linalg_ops.matrix_exponential."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import itertools
+import math
+
+import numpy as np
+
+from tensorflow.python.client import session
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import gen_linalg_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+
+
+def np_expm(x):
+ """Slow but accurate Taylor series matrix exponential."""
+ y = np.zeros(x.shape, dtype=x.dtype)
+ xn = np.eye(x.shape[0], dtype=x.dtype)
+ for n in range(40):
+ y += xn / float(math.factorial(n))
+ xn = np.dot(xn, x)
+ return y
+
+
+class ExponentialOpTest(test.TestCase):
+
+ def _verifyExponential(self, x, np_type):
+ # TODO(pfau): add matrix logarithm and test that it is inverse of expm.
+ inp = x.astype(np_type)
+ with self.test_session(use_gpu=True):
+ # Verify that x^{-1} * x == Identity matrix.
+ tf_ans = gen_linalg_ops._matrix_exponential(inp)
+ if x.size == 0:
+ np_ans = np.empty(x.shape, dtype=np_type)
+ else:
+ if x.ndim > 2:
+ np_ans = np.zeros(inp.shape, dtype=np_type)
+ for i in itertools.product(*[range(x) for x in inp.shape[:-2]]):
+ np_ans[i] = np_expm(inp[i])
+ else:
+ np_ans = np_expm(inp)
+ out = tf_ans.eval()
+ self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)
+
+ def _verifyExponentialReal(self, x):
+ for np_type in [np.float32, np.float64]:
+ self._verifyExponential(x, np_type)
+
+ def _verifyExponentialComplex(self, x):
+ for np_type in [np.complex64, np.complex128]:
+ self._verifyExponential(x, np_type)
+
+ def _makeBatch(self, matrix1, matrix2):
+ matrix_batch = np.concatenate(
+ [np.expand_dims(matrix1, 0),
+ np.expand_dims(matrix2, 0)])
+ matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
+ return matrix_batch
+
+ def testNonsymmetric(self):
+ # 2x2 matrices
+ matrix1 = np.array([[1., 2.], [3., 4.]])
+ matrix2 = np.array([[1., 3.], [3., 5.]])
+ self._verifyExponentialReal(matrix1)
+ self._verifyExponentialReal(matrix2)
+ # A multidimensional batch of 2x2 matrices
+ self._verifyExponentialReal(self._makeBatch(matrix1, matrix2))
+ # Complex
+ matrix1 = matrix1.astype(np.complex64)
+ matrix1 += 1j * matrix1
+ matrix2 = matrix2.astype(np.complex64)
+ matrix2 += 1j * matrix2
+ self._verifyExponentialComplex(matrix1)
+ self._verifyExponentialComplex(matrix2)
+ # Complex batch
+ self._verifyExponentialComplex(self._makeBatch(matrix1, matrix2))
+
+ def testSymmetricPositiveDefinite(self):
+ # 2x2 matrices
+ matrix1 = np.array([[2., 1.], [1., 2.]])
+ matrix2 = np.array([[3., -1.], [-1., 3.]])
+ self._verifyExponentialReal(matrix1)
+ self._verifyExponentialReal(matrix2)
+ # A multidimensional batch of 2x2 matrices
+ self._verifyExponentialReal(self._makeBatch(matrix1, matrix2))
+ # Complex
+ matrix1 = matrix1.astype(np.complex64)
+ matrix1 += 1j * matrix1
+ matrix2 = matrix2.astype(np.complex64)
+ matrix2 += 1j * matrix2
+ self._verifyExponentialComplex(matrix1)
+ self._verifyExponentialComplex(matrix2)
+ # Complex batch
+ self._verifyExponentialComplex(self._makeBatch(matrix1, matrix2))
+
+ def testNonSquareMatrix(self):
+ # When the exponential of a non-square matrix is attempted we should return
+ # an error
+ with self.assertRaises(ValueError):
+ gen_linalg_ops._matrix_exponential(np.array([[1., 2., 3.], [3., 4., 5.]]))
+
+ def testWrongDimensions(self):
+ # The input to the inverse should be at least a 2-dimensional tensor.
+ tensor3 = constant_op.constant([1., 2.])
+ with self.assertRaises(ValueError):
+ gen_linalg_ops._matrix_exponential(tensor3)
+
+ def testEmpty(self):
+ self._verifyExponentialReal(np.empty([0, 2, 2]))
+ self._verifyExponentialReal(np.empty([2, 0, 0]))
+
+ def testRandomSmallAndLarge(self):
+ np.random.seed(42)
+ for dtype in np.float32, np.float64, np.complex64, np.complex128:
+ for batch_dims in [(), (1,), (3,), (2, 2)]:
+ for size in 8, 31, 32:
+ shape = batch_dims + (size, size)
+ matrix = np.random.uniform(
+ low=-1.0, high=1.0,
+ size=np.prod(shape)).reshape(shape).astype(dtype)
+ self._verifyExponentialReal(matrix)
+
+ def testConcurrentExecutesWithoutError(self):
+ with self.test_session(use_gpu=True) as sess:
+ matrix1 = random_ops.random_normal([5, 5], seed=42)
+ matrix2 = random_ops.random_normal([5, 5], seed=42)
+ expm1 = gen_linalg_ops._matrix_exponential(matrix1)
+ expm2 = gen_linalg_ops._matrix_exponential(matrix2)
+ expm = sess.run([expm1, expm2])
+ self.assertAllEqual(expm[0], expm[1])
+
+
+class MatrixExponentialBenchmark(test.Benchmark):
+
+ shapes = [
+ (4, 4),
+ (10, 10),
+ (16, 16),
+ (101, 101),
+ (256, 256),
+ (1000, 1000),
+ (1024, 1024),
+ (2048, 2048),
+ (513, 4, 4),
+ (513, 16, 16),
+ (513, 256, 256),
+ ]
+
+ def _GenerateMatrix(self, shape):
+ batch_shape = shape[:-2]
+ shape = shape[-2:]
+ assert shape[0] == shape[1]
+ n = shape[0]
+ matrix = np.ones(shape).astype(np.float32) / (
+ 2.0 * n) + np.diag(np.ones(n).astype(np.float32))
+ return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
+
+ def benchmarkMatrixExponentialOp(self):
+ for shape in self.shapes:
+ with ops.Graph().as_default(), \
+ session.Session() as sess, \
+ ops.device("/cpu:0"):
+ matrix = self._GenerateMatrix(shape)
+ expm = gen_linalg_ops._matrix_exponential(matrix)
+ variables.global_variables_initializer().run()
+ self.run_op_benchmark(
+ sess,
+ control_flow_ops.group(expm),
+ min_iters=25,
+ name="matrix_exponential_cpu_{shape}".format(
+ shape=shape))
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/python/kernel_tests/spacetodepth_op_test.py b/tensorflow/python/kernel_tests/spacetodepth_op_test.py
index 4a9353d6bf..3c98a685e0 100644
--- a/tensorflow/python/kernel_tests/spacetodepth_op_test.py
+++ b/tensorflow/python/kernel_tests/spacetodepth_op_test.py
@@ -277,11 +277,15 @@ class SpaceToDepthTest(test.TestCase):
class SpaceToDepthGradientTest(test.TestCase):
# Check the gradients.
- def _checkGrad(self, x, block_size):
+ def _checkGrad(self, x, block_size, data_format):
+ # NCHW is implemented for only GPU.
+ if data_format == "NCHW" and not test.is_gpu_available():
+ return
+
assert 4 == x.ndim
with self.test_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
- tf_y = array_ops.space_to_depth(tf_x, block_size)
+ tf_y = array_ops.space_to_depth(tf_x, block_size, data_format=data_format)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
@@ -295,23 +299,28 @@ class SpaceToDepthGradientTest(test.TestCase):
# Tests a gradient for space_to_depth of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
- def _compare(self, b, h, w, d, block_size):
+ def _compare(self, b, h, w, d, block_size, data_format):
block_size_sq = block_size * block_size
- x = np.random.normal(0, 1, b * h * w * d *
- block_size_sq).astype(np.float32).reshape(
- [b, h * block_size, w * block_size, d])
+ data = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(
+ np.float32)
+ if data_format == "NHWC":
+ x = data.reshape([b, h * block_size, w * block_size, d])
+ else:
+ x = data.reshape([b, d, h * block_size, w * block_size])
- self._checkGrad(x, block_size)
+ self._checkGrad(x, block_size, data_format)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
- self._compare(1, 2, 3, 5, block_size)
+ self._compare(1, 2, 3, 5, block_size, "NHWC")
+ self._compare(1, 2, 3, 5, block_size, "NCHW")
def testSmall2(self):
block_size = 2
- self._compare(2, 4, 3, 2, block_size)
+ self._compare(2, 4, 3, 2, block_size, "NHWC")
+ self._compare(2, 4, 3, 2, block_size, "NCHW")
if __name__ == "__main__":
diff --git a/tensorflow/python/kernel_tests/tensor_array_ops_test.py b/tensorflow/python/kernel_tests/tensor_array_ops_test.py
index a1fc6d63d4..0f3b11e7f9 100644
--- a/tensorflow/python/kernel_tests/tensor_array_ops_test.py
+++ b/tensorflow/python/kernel_tests/tensor_array_ops_test.py
@@ -169,18 +169,22 @@ class TensorArrayTest(test.TestCase):
self._testTensorArrayWriteConcat(dtypes.complex128)
self._testTensorArrayWriteConcat(dtypes.string)
- def _testTensorArrayPackNotAllValuesAvailableFails(self):
+ def _testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
with self.test_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
- dtype=dtypes.float32, tensor_array_name="foo", size=3)
-
- with self.assertRaisesOpError("Could not read from TensorArray index 1 "
- "because it has not yet been written to."):
- self.evaluate(ta.write(0, [[4.0, 5.0]]).stack())
+ dtype=dtypes.float32,
+ tensor_array_name="foo",
+ size=3,
+ element_shape=tensor_shape.TensorShape([1, 2]))
+ self.assertAllEqual([[0.0, 0.0]], self.evaluate(ta.read(0)))
+ self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]],
+ self.evaluate(ta.write(1, [[4.0, 5.0]]).stack()))
+ self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],
+ self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))
@test_util.run_in_graph_and_eager_modes()
- def testTensorArrayPackNotAllValuesAvailableFails(self):
- self._testTensorArrayPackNotAllValuesAvailableFails()
+ def testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
+ self._testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros()
def _testTensorArrayUnpackRead(self, tf_dtype):
with self.test_session(use_gpu=True):
@@ -423,12 +427,6 @@ class TensorArrayTest(test.TestCase):
"TensorArray dtype is float but Op requested dtype double."):
r0_bad.eval()
- # Test reading from a different index than the one we wrote to
- with self.assertRaisesOpError(
- "Could not read from TensorArray index 1 because "
- "it has not yet been written to."):
- self.evaluate(w0.read(1))
-
# Test reading from a negative index, which is not allowed
if context.in_graph_mode():
with self.assertRaisesOpError(
diff --git a/tensorflow/python/ops/array_grad.py b/tensorflow/python/ops/array_grad.py
index 7e632c75e8..3c025881cb 100644
--- a/tensorflow/python/ops/array_grad.py
+++ b/tensorflow/python/ops/array_grad.py
@@ -641,14 +641,22 @@ def _BatchToSpaceNDGrad(op, grad):
def _SpaceToDepthGrad(op, grad):
# Its gradient is the opposite op: DepthToSpace.
block_size = op.get_attr("block_size")
- return array_ops.depth_to_space(grad, block_size)
+ data_format = op.get_attr("data_format")
+ if data_format == "NCHW_VECT_C":
+ raise ValueError("Cannot compute SpaceToDepth gradient with NCHW_VECT_C. "
+ "NCHW_VECT_C requires qint8 data type.")
+ return array_ops.depth_to_space(grad, block_size, data_format=data_format)
@ops.RegisterGradient("DepthToSpace")
def _DepthToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToDepth.
block_size = op.get_attr("block_size")
- return array_ops.space_to_depth(grad, block_size)
+ data_format = op.get_attr("data_format")
+ if data_format == "NCHW_VECT_C":
+ raise ValueError("Cannot compute DepthToSpace gradient with NCHW_VECT_C. "
+ "NCHW_VECT_C requires qint8 data type.")
+ return array_ops.space_to_depth(grad, block_size, data_format=data_format)
ops.NotDifferentiable("OneHot")
diff --git a/tensorflow/python/ops/distributions/special_math.py b/tensorflow/python/ops/distributions/special_math.py
index 6b38a4958e..222a39ad82 100644
--- a/tensorflow/python/ops/distributions/special_math.py
+++ b/tensorflow/python/ops/distributions/special_math.py
@@ -197,9 +197,10 @@ def _ndtri(p):
# Write in an arbitrary value in place of 0 for p since 0 will cause NaNs
# later on. The result from the computation when p == 0 is not used so any
# number that doesn't result in NaNs is fine.
+ one_half = constant_op.constant(0.5, dtype=p.dtype)
sanitized_mcp = array_ops.where(
maybe_complement_p <= 0.,
- 0.5 * array_ops.ones_like(p),
+ array_ops.fill(array_ops.shape(p), one_half),
maybe_complement_p)
# Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2).
@@ -226,7 +227,8 @@ def _ndtri(p):
array_ops.where(z >= 8.0, x_for_small_p, x_otherwise))
x = array_ops.where(p > 1. - np.exp(-2.), x, -x)
- infinity = constant_op.constant(np.inf, dtype=x.dtype) * array_ops.ones_like(x)
+ infinity_scalar = constant_op.constant(np.inf, dtype=p.dtype)
+ infinity = array_ops.fill(array_ops.shape(p), infinity_scalar)
x_nan_replaced = array_ops.where(
p <= 0.0, -infinity, array_ops.where(p >= 1.0, infinity, x))
return x_nan_replaced
diff --git a/tensorflow/python/ops/hidden_ops.txt b/tensorflow/python/ops/hidden_ops.txt
index 732ab8f15a..a0fff9e16c 100644
--- a/tensorflow/python/ops/hidden_ops.txt
+++ b/tensorflow/python/ops/hidden_ops.txt
@@ -223,6 +223,7 @@ BatchSelfAdjointEig
BatchSelfAdjointEigV2
BatchSvd
LogMatrixDeterminant
+MatrixExponential
MatrixSolveLs
SelfAdjointEig
SelfAdjointEigV2
diff --git a/tensorflow/python/ops/linalg/linalg_impl.py b/tensorflow/python/ops/linalg/linalg_impl.py
index 04a15e3e5b..bf15f0e2e5 100644
--- a/tensorflow/python/ops/linalg/linalg_impl.py
+++ b/tensorflow/python/ops/linalg/linalg_impl.py
@@ -38,6 +38,7 @@ diag_part = array_ops.matrix_diag_part
eigh = linalg_ops.self_adjoint_eig
eigvalsh = linalg_ops.self_adjoint_eigvals
einsum = special_math_ops.einsum
+expm = gen_linalg_ops._matrix_exponential
eye = linalg_ops.eye
inv = linalg_ops.matrix_inverse
lstsq = linalg_ops.matrix_solve_ls
diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py
index 9db4b0d8cc..578778f1f3 100644
--- a/tensorflow/python/ops/math_ops.py
+++ b/tensorflow/python/ops/math_ops.py
@@ -89,6 +89,7 @@ See the @{$python/math_ops} guide.
@@matrix_inverse
@@cholesky
@@cholesky_solve
+@@matrix_exponential
@@matrix_solve
@@matrix_triangular_solve
@@matrix_solve_ls
diff --git a/tensorflow/python/ops/resource_variable_ops.py b/tensorflow/python/ops/resource_variable_ops.py
index 9e5bb4a225..a746735f58 100644
--- a/tensorflow/python/ops/resource_variable_ops.py
+++ b/tensorflow/python/ops/resource_variable_ops.py
@@ -63,7 +63,7 @@ def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
raise ValueError("variable object with name '%s' already created. Use "
"get_variable() if reuse is desired." %
shared_name)
- with context.graph_mode(), ops.Graph().as_default():
+ with context.graph_mode(), ops.Graph().as_default() as graph:
h = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
@@ -74,6 +74,25 @@ def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
# shape inference doesn't run in eager mode we copy this data here for when
# the handle is captured by an eager mode function.
handle._handle_data = h._handle_data # pylint: disable=protected-access
+ # Clean up our reference cycles to avoid making the garbage collector run.
+ # pylint: disable=protected-access
+ # OrderedDict, constructed on Graph creation, makes a simple reference loop
+ # and hides it in an __attribute in some Python versions. We don't need to
+ # throw an error if we can't find it, but if we do find it we can break the
+ # loop to avoid creating work for the garbage collector.
+ problematic_cycle = graph._functions.__dict__.get("_OrderedDict__root", None)
+ # pylint: enable=protected-access
+ if problematic_cycle:
+ try:
+ del problematic_cycle[0][:]
+ except TypeError:
+ # This is probably not one of the problematic Python versions. Continue
+ # with the rest of our cleanup.
+ pass
+ # Now clean up our own reference cycles by clearing all of the attributes for
+ # the Graph and op we created.
+ h.__dict__ = {}
+ graph.__dict__ = {}
return handle
@@ -454,6 +473,7 @@ class ResourceVariable(variables.Variable):
ops.add_to_collections(collections, self)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self)
+
if not self._in_graph_mode:
# After the handle has been created, set up a way to clean it up when
# executing eagerly. We'll hold the only reference to the deleter, so that
diff --git a/tensorflow/python/ops/tensor_array_ops.py b/tensorflow/python/ops/tensor_array_ops.py
index b4b7ad9d91..ea5354c1d6 100644
--- a/tensorflow/python/ops/tensor_array_ops.py
+++ b/tensorflow/python/ops/tensor_array_ops.py
@@ -593,10 +593,7 @@ class _EagerTensorArray(object):
"a previous read (perhaps try setting clear_after_read = false?)" %
index)
else:
- raise errors_impl.InvalidArgumentError(
- None, None,
- "Could not read from TensorArray index %d because it has not yet "
- "been written to." % index)
+ tensor = self._maybe_zero(index)
if self._clear_after_read:
self._tensor_array[index] = None
@@ -610,52 +607,36 @@ class _EagerTensorArray(object):
_eager_write_no_copy(ta._implementation, index, value) # pylint: disable=protected-access
return ta
+ def _maybe_zero(self, ix):
+ val = self._tensor_array[ix]
+ if val is None:
+ val = self._tensor_array[ix] = array_ops.zeros(
+ shape=self._element_shape, dtype=self._dtype)
+ return val
+
def stack(self, name=None):
"""See TensorArray."""
- try:
- return array_ops.stack(self._tensor_array, name=name)
- except ValueError:
- if None in self._tensor_array:
- idx = self._tensor_array.index(None)
- raise errors_impl.InvalidArgumentError(
- None, None, "Could not read from TensorArray index %d because "
- "it has not yet been written to." % idx)
- else:
- raise
+ if self._tensor_array:
+ for ix in range(len(self._tensor_array)):
+ self._maybe_zero(ix)
+ return array_ops.stack(self._tensor_array, name=name)
def gather(self, indices, name=None):
"""See TensorArray."""
del name # not meaningful in Eager mode
- return array_ops.stack([self._tensor_array[i] for i in indices.numpy()])
+ return array_ops.stack([self._maybe_zero(i) for i in indices.numpy()])
def concat(self, name=None):
"""See TensorArray."""
try:
- return array_ops.concat(self._tensor_array, 0, name=name)
+ return array_ops.concat(
+ [self._maybe_zero(ix) for ix in range(len(self._tensor_array))],
+ 0, name=name)
except errors_impl.OpError:
# Reproduce a subset of the error-handling for graph-mode TensorArrays.
shapes = [t.shape for t in self._tensor_array]
ndims = [s.ndims for s in shapes]
- if None in self._tensor_array:
- # Concatenating empty TensorArrays is permitted if the element
- # shape is defined; the output is a tensor with shape
- # [0] + self._element_shape[1:]
- if all(t is None for t in self._tensor_array):
- if self._element_shape is not None:
- return constant_op.constant([], shape=[0] + self._element_shape[1:])
- else:
- raise errors_impl.UnimplementedError(
- None, None, "TensorArray has size zero, but "
- "element_shape_except0 %s is not fully defined. Currently only "
- "static shapes are supported when concatenating zero-size "
- "TensorArrays." % self._element_shape[1:])
- # Concatenating a TensorArray in which some but not all entries have
- # been written to is not allowed.
- idx = self._tensor_array.index(None)
- raise errors_impl.InvalidArgumentError(
- None, None, "Could not read from TensorArray index %d because "
- "it has not yet been written to." % idx)
- elif 0 in ndims:
+ if 0 in ndims:
idx = ndims.index(0)
raise errors_impl.InvalidArgumentError(
None, None, "Concat saw a scalar shape at index %d but requires "
diff --git a/tensorflow/python/platform/app.py b/tensorflow/python/platform/app.py
index 5ecaa1baaf..c01e1c9b1a 100644
--- a/tensorflow/python/platform/app.py
+++ b/tensorflow/python/platform/app.py
@@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import errno as _errno
import sys as _sys
from tensorflow.python.platform import flags
@@ -28,24 +29,108 @@ def _benchmark_tests_can_log_memory():
return True
+def _usage(shorthelp):
+ """Writes __main__'s docstring to stdout with some help text.
+
+ Args:
+ shorthelp: bool, if True, prints only flags from the main module,
+ rather than all flags.
+ """
+ doc = _sys.modules['__main__'].__doc__
+ if not doc:
+ doc = '\nUSAGE: %s [flags]\n' % _sys.argv[0]
+ doc = flags.text_wrap(doc, indent=' ', firstline_indent='')
+ else:
+ # Replace all '%s' with sys.argv[0], and all '%%' with '%'.
+ num_specifiers = doc.count('%') - 2 * doc.count('%%')
+ try:
+ doc %= (_sys.argv[0],) * num_specifiers
+ except (OverflowError, TypeError, ValueError):
+ # Just display the docstring as-is.
+ pass
+ if shorthelp:
+ flag_str = flags.FLAGS.main_module_help()
+ else:
+ flag_str = str(flags.FLAGS)
+ try:
+ _sys.stdout.write(doc)
+ if flag_str:
+ _sys.stdout.write('\nflags:\n')
+ _sys.stdout.write(flag_str)
+ _sys.stdout.write('\n')
+ except IOError as e:
+ # We avoid printing a huge backtrace if we get EPIPE, because
+ # "foo.par --help | less" is a frequent use case.
+ if e.errno != _errno.EPIPE:
+ raise
+
+
+class _HelpFlag(flags.BooleanFlag):
+ """Special boolean flag that displays usage and raises SystemExit."""
+ NAME = 'help'
+ SHORT_NAME = 'h'
+
+ def __init__(self):
+ super(_HelpFlag, self).__init__(
+ self.NAME, False, 'show this help', short_name=self.SHORT_NAME)
+
+ def parse(self, arg):
+ if arg:
+ _usage(shorthelp=True)
+ print()
+ print('Try --helpfull to get a list of all flags.')
+ _sys.exit(1)
+
+
+class _HelpshortFlag(_HelpFlag):
+ """--helpshort is an alias for --help."""
+ NAME = 'helpshort'
+ SHORT_NAME = None
+
+
+class _HelpfullFlag(flags.BooleanFlag):
+ """Display help for flags in main module and all dependent modules."""
+
+ def __init__(self):
+ super(_HelpfullFlag, self).__init__('helpfull', False, 'show full help')
+
+ def parse(self, arg):
+ if arg:
+ _usage(shorthelp=False)
+ _sys.exit(1)
+
+
+_define_help_flags_called = False
+
+
+def _define_help_flags():
+ global _define_help_flags_called
+ if not _define_help_flags_called:
+ flags.DEFINE_flag(_HelpFlag())
+ flags.DEFINE_flag(_HelpfullFlag())
+ flags.DEFINE_flag(_HelpshortFlag())
+ _define_help_flags_called = True
+
+
def run(main=None, argv=None):
"""Runs the program with an optional 'main' function and 'argv' list."""
- f = flags.FLAGS
- # Extract the args from the optional `argv` list.
- args = argv[1:] if argv else None
+ # Define help flags.
+ _define_help_flags()
- # Parse the known flags from that list, or from the command
- # line otherwise.
- # pylint: disable=protected-access
- flags_passthrough = f._parse_flags(args=args)
- # pylint: enable=protected-access
+ # Parse flags.
+ try:
+ argv = flags.FLAGS(_sys.argv if argv is None else argv)
+ except flags.Error as error:
+ _sys.stderr.write('FATAL Flags parsing error: %s\n' % error)
+ _sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\n')
+ _sys.exit(1)
main = main or _sys.modules['__main__'].main
# Call the main function, passing through any arguments
# to the final program.
- _sys.exit(main(_sys.argv[:1] + flags_passthrough))
+ _sys.exit(main(argv))
_allowed_symbols = [
diff --git a/tensorflow/python/platform/flags.py b/tensorflow/python/platform/flags.py
index 138a0ced97..e9a36ae75d 100644
--- a/tensorflow/python/platform/flags.py
+++ b/tensorflow/python/platform/flags.py
@@ -13,199 +13,10 @@
# limitations under the License.
# ==============================================================================
-"""Implementation of the flags interface."""
+"""Import router for absl.flags. See https://github.com/abseil/abseil-py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import argparse as _argparse
-
-from tensorflow.python.platform import tf_logging as _logging
-from tensorflow.python.util.all_util import remove_undocumented
-
-_global_parser = _argparse.ArgumentParser()
-
-
-# pylint: disable=invalid-name
-
-
-class _FlagValues(object):
- """Global container and accessor for flags and their values."""
-
- def __init__(self):
- self.__dict__['__flags'] = {}
- self.__dict__['__parsed'] = False
- self.__dict__['__required_flags'] = set()
-
- def _parse_flags(self, args=None):
- result, unparsed = _global_parser.parse_known_args(args=args)
- for flag_name, val in vars(result).items():
- self.__dict__['__flags'][flag_name] = val
- self.__dict__['__parsed'] = True
- self._assert_all_required()
- return unparsed
-
- def __getattr__(self, name):
- """Retrieves the 'value' attribute of the flag --name."""
- try:
- parsed = self.__dict__['__parsed']
- except KeyError:
- # May happen during pickle.load or copy.copy
- raise AttributeError(name)
- if not parsed:
- self._parse_flags()
- if name not in self.__dict__['__flags']:
- raise AttributeError(name)
- return self.__dict__['__flags'][name]
-
- def __setattr__(self, name, value):
- """Sets the 'value' attribute of the flag --name."""
- if not self.__dict__['__parsed']:
- self._parse_flags()
- self.__dict__['__flags'][name] = value
- self._assert_required(name)
-
- def _add_required_flag(self, item):
- self.__dict__['__required_flags'].add(item)
-
- def _assert_required(self, flag_name):
- if (flag_name not in self.__dict__['__flags'] or
- self.__dict__['__flags'][flag_name] is None):
- raise AttributeError('Flag --%s must be specified.' % flag_name)
-
- def _assert_all_required(self):
- for flag_name in self.__dict__['__required_flags']:
- self._assert_required(flag_name)
-
-
-def _define_helper(flag_name, default_value, docstring, flagtype):
- """Registers 'flag_name' with 'default_value' and 'docstring'."""
- _global_parser.add_argument('--' + flag_name,
- default=default_value,
- help=docstring,
- type=flagtype)
-
-
-# Provides the global object that can be used to access flags.
-FLAGS = _FlagValues()
-
-
-def DEFINE_string(flag_name, default_value, docstring):
- """Defines a flag of type 'string'.
-
- Args:
- flag_name: The name of the flag as a string.
- default_value: The default value the flag should take as a string.
- docstring: A helpful message explaining the use of the flag.
- """
- _define_helper(flag_name, default_value, docstring, str)
-
-
-def DEFINE_integer(flag_name, default_value, docstring):
- """Defines a flag of type 'int'.
-
- Args:
- flag_name: The name of the flag as a string.
- default_value: The default value the flag should take as an int.
- docstring: A helpful message explaining the use of the flag.
- """
- _define_helper(flag_name, default_value, docstring, int)
-
-
-def DEFINE_boolean(flag_name, default_value, docstring):
- """Defines a flag of type 'boolean'.
-
- Args:
- flag_name: The name of the flag as a string.
- default_value: The default value the flag should take as a boolean.
- docstring: A helpful message explaining the use of the flag.
- """
- # Register a custom function for 'bool' so --flag=True works.
- def str2bool(v):
- return v.lower() in ('true', 't', '1')
- _global_parser.add_argument('--' + flag_name,
- nargs='?',
- const=True,
- help=docstring,
- default=default_value,
- type=str2bool)
-
- # Add negated version, stay consistent with argparse with regard to
- # dashes in flag names.
- _global_parser.add_argument('--no' + flag_name,
- action='store_false',
- dest=flag_name.replace('-', '_'))
-
-
-# The internal google library defines the following alias, so we match
-# the API for consistency.
-DEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name
-
-
-def DEFINE_float(flag_name, default_value, docstring):
- """Defines a flag of type 'float'.
-
- Args:
- flag_name: The name of the flag as a string.
- default_value: The default value the flag should take as a float.
- docstring: A helpful message explaining the use of the flag.
- """
- _define_helper(flag_name, default_value, docstring, float)
-
-
-def mark_flag_as_required(flag_name):
- """Ensures that flag is not None during program execution.
-
- It is recommended to call this method like this:
-
- if __name__ == '__main__':
- tf.flags.mark_flag_as_required('your_flag_name')
- tf.app.run()
-
- Args:
- flag_name: string, name of the flag to mark as required.
-
- Raises:
- AttributeError: if flag_name is not registered as a valid flag name.
- NOTE: The exception raised will change in the future.
- """
- if _global_parser.get_default(flag_name) is not None:
- _logging.warn(
- 'Flag %s has a non-None default value; therefore, '
- 'mark_flag_as_required will pass even if flag is not specified in the '
- 'command line!' % flag_name)
- FLAGS._add_required_flag(flag_name)
-
-
-def mark_flags_as_required(flag_names):
- """Ensures that flags are not None during program execution.
-
- Recommended usage:
-
- if __name__ == '__main__':
- tf.flags.mark_flags_as_required(['flag1', 'flag2', 'flag3'])
- tf.app.run()
-
- Args:
- flag_names: a list/tuple of flag names to mark as required.
-
- Raises:
- AttributeError: If any of flag name has not already been defined as a flag.
- NOTE: The exception raised will change in the future.
- """
- for flag_name in flag_names:
- mark_flag_as_required(flag_name)
-
-
-_allowed_symbols = [
- # We rely on gflags documentation.
- 'DEFINE_bool',
- 'DEFINE_boolean',
- 'DEFINE_float',
- 'DEFINE_integer',
- 'DEFINE_string',
- 'FLAGS',
- 'mark_flag_as_required',
- 'mark_flags_as_required',
-]
-remove_undocumented(__name__, _allowed_symbols)
+# go/tf-wildcard-import
+from absl.flags import * # pylint: disable=wildcard-import
diff --git a/tensorflow/python/platform/flags_test.py b/tensorflow/python/platform/flags_test.py
index 7b08c3f8a6..23060e17d2 100644
--- a/tensorflow/python/platform/flags_test.py
+++ b/tensorflow/python/platform/flags_test.py
@@ -12,108 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Tests for our flags implementation."""
+"""Sanity tests for tf.flags."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import copy
-import sys
import unittest
-from tensorflow.python.platform import app
-from tensorflow.python.platform import flags
-
-flags.DEFINE_string("string_foo", "default_val", "HelpString")
-flags.DEFINE_integer("int_foo", 42, "HelpString")
-flags.DEFINE_float("float_foo", 42.0, "HelpString")
+from absl import flags as absl_flags
-flags.DEFINE_boolean("bool_foo", True, "HelpString")
-flags.DEFINE_boolean("bool_negation", True, "HelpString")
-flags.DEFINE_boolean("bool-dash-negation", True, "HelpString")
-flags.DEFINE_boolean("bool_a", False, "HelpString")
-flags.DEFINE_boolean("bool_c", False, "HelpString")
-flags.DEFINE_boolean("bool_d", True, "HelpString")
-flags.DEFINE_bool("bool_e", True, "HelpString")
-flags.DEFINE_string("string_foo_required", "default_val", "HelpString")
-flags.DEFINE_string("none_string_foo_required", None, "HelpString")
-
-FLAGS = flags.FLAGS
+from tensorflow.python.platform import flags
class FlagsTest(unittest.TestCase):
- def testString(self):
- res = FLAGS.string_foo
- self.assertEqual(res, "default_val")
- FLAGS.string_foo = "bar"
- self.assertEqual("bar", FLAGS.string_foo)
-
- def testBool(self):
- res = FLAGS.bool_foo
- self.assertTrue(res)
- FLAGS.bool_foo = False
- self.assertFalse(FLAGS.bool_foo)
-
- def testBoolCommandLines(self):
- # Specified on command line with no args, sets to True,
- # even if default is False.
- self.assertEqual(True, FLAGS.bool_a)
-
- # --no before the flag forces it to False, even if the
- # default is True
- self.assertEqual(False, FLAGS.bool_negation)
-
- # --bool_flag=True sets to True
- self.assertEqual(True, FLAGS.bool_c)
-
- # --bool_flag=False sets to False
- self.assertEqual(False, FLAGS.bool_d)
-
- def testInt(self):
- res = FLAGS.int_foo
- self.assertEquals(res, 42)
- FLAGS.int_foo = -1
- self.assertEqual(-1, FLAGS.int_foo)
-
- def testFloat(self):
- res = FLAGS.float_foo
- self.assertEquals(42.0, res)
- FLAGS.float_foo = -1.0
- self.assertEqual(-1.0, FLAGS.float_foo)
-
- def test_copy(self):
- copied = copy.copy(FLAGS)
- self.assertEqual(copied.__dict__, FLAGS.__dict__)
-
- def testStringRequired(self):
- res = FLAGS.string_foo_required
- self.assertEqual(res, "default_val")
- FLAGS.string_foo_required = "bar"
- self.assertEqual("bar", FLAGS.string_foo_required)
-
- def testNoneStringRequired(self):
- res = FLAGS.none_string_foo_required
- self.assertEqual(res, "default_val")
- FLAGS.none_string_foo_required = "bar"
- self.assertEqual("bar", FLAGS.none_string_foo_required)
-
-
-def main(_):
- # unittest.main() tries to interpret the unknown flags, so use the
- # direct functions instead.
- runner = unittest.TextTestRunner()
- itersuite = unittest.TestLoader().loadTestsFromTestCase(FlagsTest)
- runner.run(itersuite)
+ def test_global_flags_object(self):
+ self.assertIs(flags.FLAGS, absl_flags.FLAGS)
if __name__ == "__main__":
- # Test command lines
- sys.argv.extend([
- "--bool_a", "--nobool_negation", "--bool_c=True", "--bool_d=False",
- "--none_string_foo_required=default_val",
- "and_argument"
- ])
- flags.mark_flag_as_required('string_foo_required')
- flags.mark_flags_as_required(['none_string_foo_required'])
- app.run()
+ unittest.main()
diff --git a/tensorflow/python/tools/BUILD b/tensorflow/python/tools/BUILD
index 50bffd1474..69586c6a47 100644
--- a/tensorflow/python/tools/BUILD
+++ b/tensorflow/python/tools/BUILD
@@ -151,7 +151,6 @@ py_library(
srcs = ["optimize_for_inference_lib.py"],
srcs_version = "PY2AND3",
deps = [
- ":strip_unused",
":strip_unused_lib",
"//tensorflow/core:protos_all_py",
"//tensorflow/python:framework",
diff --git a/tensorflow/python/training/checkpoint_ops.py b/tensorflow/python/training/checkpoint_ops.py
index 0769ccd3d1..7f92d94d2b 100644
--- a/tensorflow/python/training/checkpoint_ops.py
+++ b/tensorflow/python/training/checkpoint_ops.py
@@ -36,6 +36,7 @@ def _load_and_remap_matrix(ckpt_path,
num_rows_to_load,
new_col_vocab_size,
initializer,
+ old_row_vocab_size=-1,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
@@ -75,6 +76,12 @@ def _load_and_remap_matrix(ckpt_path,
initializer: Callable initializer function that accepts a 1-D tensor as the
arg to specify the shape of the returned tensor. Used to initialize
missing values.
+ old_row_vocab_size: The number of entries to consider in the old vocabulary.
+ With the default value of -1, the entire old row vocabulary file will be
+ used. Otherwise, only the first `old_row_vocab_size` entries will be
+ considered for remapping.Must be smaller than the length of
+ `old_row_vocab_file`. NOTE: we do not provide an equivalent
+ `old_col_vocab_size` for classes.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
@@ -146,7 +153,8 @@ def _load_and_remap_matrix(ckpt_path,
new_vocab_file=new_row_vocab_file,
old_vocab_file=old_row_vocab_file,
new_vocab_offset=new_row_vocab_offset,
- num_new_vocab=num_rows_to_load))
+ num_new_vocab=num_rows_to_load,
+ old_vocab_size=old_row_vocab_size))
else:
# Even when the rows are not being reordered, we still need to generate a
# remapping to account for initializing partitioned Variables (when
@@ -199,6 +207,7 @@ def _load_and_remap_matrix_initializer(ckpt_path,
old_tensor_name,
new_row_vocab_size,
new_col_vocab_size,
+ old_row_vocab_size=-1,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
@@ -280,6 +289,12 @@ def _load_and_remap_matrix_initializer(ckpt_path,
`new_col_vocab_file`. If no column remapping is needed (no column vocab
provided), this should be equal to the number of columns in the old
matrix.
+ old_row_vocab_size: The number of entries to consider in the old vocabulary.
+ With the default value of -1, the entire old row vocabulary file will be
+ used. Otherwise, only the first `old_row_vocab_size` entries will be
+ considered for remapping.Must be smaller than the length of
+ `old_row_vocab_file`. NOTE: we do not provide an equivalent
+ `old_col_vocab_size` for classes.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
@@ -388,6 +403,7 @@ def _load_and_remap_matrix_initializer(ckpt_path,
num_rows_to_load=num_rows_to_load,
new_col_vocab_size=new_col_vocab_size,
initializer=initializer,
+ old_row_vocab_size=old_row_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
@@ -405,6 +421,7 @@ def _load_embedding_initializer(ckpt_path,
embedding_dim,
old_vocab_file,
new_vocab_file,
+ old_vocab_size=-1,
num_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
@@ -428,6 +445,11 @@ def _load_embedding_initializer(ckpt_path,
path to the old vocabulary file.
new_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new vocabulary file.
+ old_vocab_size: The number of entries to consider in the old vocabulary.
+ With the default value of -1, the entire old row vocabulary file will be
+ used. Otherwise, only the first `old_vocab_size` entries will be
+ considered for remapping.Must be smaller than the length of
+ `old_row_vocab_file`.
num_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
@@ -452,6 +474,7 @@ def _load_embedding_initializer(ckpt_path,
old_tensor_name=embedding_tensor_name,
new_row_vocab_size=new_vocab_size,
new_col_vocab_size=embedding_dim,
+ old_row_vocab_size=old_vocab_size,
old_row_vocab_file=old_vocab_file,
new_row_vocab_file=new_vocab_file,
old_col_vocab_file=None,
diff --git a/tensorflow/python/training/checkpoint_ops_test.py b/tensorflow/python/training/checkpoint_ops_test.py
index b578dde251..00611de862 100644
--- a/tensorflow/python/training/checkpoint_ops_test.py
+++ b/tensorflow/python/training/checkpoint_ops_test.py
@@ -103,7 +103,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
num_col_oov_buckets=1)
# [4 in vocab + 1 oov features, 4 in vocab + 1 oov classes]. The offset
- # means we read
+ # means we read from the first line.
expected_remapped_matrix = np.concatenate(
[
np.reshape([18, 34, 50, self.init_val, self.init_val], [5, 1]),
@@ -132,6 +132,9 @@ class LoadAndRemapWrappersTest(test.TestCase):
num_col_oov_buckets=1,
initializer=self.initializer))
+ # The new weight matrix is of size
+ # [5 feature vocab + 1 feature OOV, 4 class vocab + 1 class OOV]. Use a
+ # partitioned variable to confirm that the offset logic works.
expected_remapped_matrix = np.concatenate(
[
np.reshape([2, 18, 34, 50, self.init_val, self.init_val], [6, 1]),
@@ -141,10 +144,6 @@ class LoadAndRemapWrappersTest(test.TestCase):
np.reshape([self.init_val] * 6, [6, 1])
],
axis=1)
-
- # The new weight matrix is of size
- # [5 feature vocab + 1 feature OOV, 4 class vocab + 1 class OOV]. Use a
- # partitioned variable to confirm that the offset logic works.
remapped_matrix = variable_scope.get_variable(
name='linear/obtained_weight_matrix',
shape=[6, 5],
@@ -168,6 +167,8 @@ class LoadAndRemapWrappersTest(test.TestCase):
num_col_oov_buckets=1,
initializer=self.initializer))
+ # The new weight matrix is of size
+ # [5-sized input layer, 4 class vocab + 1 class OOV].
expected_remapped_matrix = np.concatenate(
[
np.reshape([2, 18, 34, 50, 66], [5, 1]),
@@ -177,9 +178,6 @@ class LoadAndRemapWrappersTest(test.TestCase):
np.reshape([self.init_val] * 5, [5, 1])
],
axis=1)
-
- # The new weight matrix is of size
- # [5-sized input layer, 4 class vocab + 1 class OOV].
remapped_matrix = variable_scope.get_variable(
name='dnn_output/obtained_weight_matrix',
shape=[5, 5],
@@ -206,6 +204,9 @@ class LoadAndRemapWrappersTest(test.TestCase):
num_col_oov_buckets=1,
initializer=self.initializer))
+ # The new weight matrix is of size
+ # [5 feature vocab + 5 feature OOV, 4 class vocab + 1 class OOV]. The
+ # second partition has only OOV.
expected_remapped_matrix = np.concatenate(
[
np.reshape([2, 18, 34, 50] + [self.init_val] * 6, [10, 1]),
@@ -215,10 +216,6 @@ class LoadAndRemapWrappersTest(test.TestCase):
np.reshape([self.init_val] * 10, [10, 1]),
],
axis=1)
-
- # The new weight matrix is of size
- # [5 feature vocab + 5 feature OOV, 4 class vocab + 1 class OOV]. The
- # second partition has only OOV.
remapped_matrix = variable_scope.get_variable(
name='linear_all_oov/obtained_weight_matrix',
shape=[10, 5],
@@ -244,6 +241,8 @@ class LoadAndRemapWrappersTest(test.TestCase):
num_row_oov_buckets=1,
num_col_oov_buckets=1))
+ # Same as test_initializer_with_oov_only_partition, but with zero
+ # initialization.
expected_remapped_matrix = np.concatenate(
[
np.reshape([2, 18, 34, 50, 0, 0], [6, 1]),
@@ -253,7 +252,6 @@ class LoadAndRemapWrappersTest(test.TestCase):
np.reshape([0] * 6, [6, 1])
],
axis=1)
-
remapped_matrix = variable_scope.get_variable(
name='linear_init_fallback/obtained_weight_matrix',
shape=[6, 5],
@@ -277,18 +275,17 @@ class LoadAndRemapWrappersTest(test.TestCase):
num_oov_buckets=1,
initializer=self.initializer))
+ # The new weight matrix is of size
+ # [5 feature vocab + 1 feature OOV, 16 (embedding dimension)], where the
+ # last vocab row (2nd last row) is newly initialized (wasn't found in
+ # previous vocab) and the actual last row is OOV and also newly initialized.
+ # Use a partitioned variable to confirm that the offset logic works.
expected_remapped_embeddings = np.concatenate(
[
np.reshape(range(64), [4, 16]),
np.reshape([self.init_val] * 32, [2, 16]),
],
axis=0)
-
- # The new weight matrix is of size
- # [5 feature vocab + 1 feature OOV, 16 (embedding dimension)], where the
- # last vocab row (2nd last row) is newly initialized (wasn't found in
- # previous vocab) and the actual last row is OOV and also newly initialized.
- # Use a partitioned variable to confirm that the offset logic works.
remapped_embeddings = variable_scope.get_variable(
name='embedding/obtained_embedding_matrix',
shape=[6, 16],
@@ -323,6 +320,11 @@ class LoadAndRemapWrappersTest(test.TestCase):
num_oov_buckets=5,
initializer=self.initializer))
+ # The new weight matrix is of size
+ # [4 feature vocab + 5 feature OOV, 16 (embedding dimension)], where the
+ # 3rd and 4th rows are not found in the old vocabulary and therefore newly
+ # initialized. The last five rows are OOV and also newly initialized.
+ # Use a partitioned variable to confirm that the offset logic works.
expected_remapped_embeddings = np.concatenate(
[
np.reshape(range(16, 32), [1, 16]),
@@ -330,15 +332,47 @@ class LoadAndRemapWrappersTest(test.TestCase):
np.reshape([self.init_val] * 112, [7, 16]),
],
axis=0)
+ remapped_embeddings = variable_scope.get_variable(
+ name='embedding/obtained_embedding_matrix',
+ shape=[9, 16],
+ initializer=embedding_loading_initializer,
+ partitioner=partitioned_variables.fixed_size_partitioner(2))
+
+ with self.test_session():
+ variables.global_variables_initializer().run()
+ self.assertAllClose(expected_remapped_embeddings,
+ remapped_embeddings.as_tensor().eval())
+
+ def test_load_embedding_initializer_old_row_vocab(self):
+ """Tests for load_embedding_initializer where we constrain old vocab."""
+ embedding_loading_initializer = (
+ checkpoint_ops._load_embedding_initializer(
+ new_vocab_file=self.new_feature_vocab_file,
+ old_vocab_file=self.old_feature_vocab_file,
+ # Considered old vocabulary becomes ['zero', 'one', 'two']. This
+ # means 'three' in the new vocabulary is newly initialized.
+ old_vocab_size=3,
+ new_vocab_size=5,
+ embedding_dim=16,
+ embedding_tensor_name='some_scope/embeddings',
+ ckpt_path=[self.checkpoint_file],
+ num_oov_buckets=1,
+ initializer=self.initializer))
# The new weight matrix is of size
- # [4 feature vocab + 5 feature OOV, 16 (embedding dimension)], where the
- # 3rd and 4th rows are not found in the old vocabulary and therefore newly
- # initialized. The last five rows are OOV and also newly initialized.
+ # [5 feature vocab + 1 feature OOV, 16 (embedding dimension)], where the
+ # last vocab row (2nd last row) is newly initialized (wasn't found in
+ # previous vocab) and the actual last row is OOV and also newly initialized.
# Use a partitioned variable to confirm that the offset logic works.
+ expected_remapped_embeddings = np.concatenate(
+ [
+ np.reshape(range(48), [3, 16]),
+ np.reshape([self.init_val] * 48, [3, 16]),
+ ],
+ axis=0)
remapped_embeddings = variable_scope.get_variable(
name='embedding/obtained_embedding_matrix',
- shape=[9, 16],
+ shape=[6, 16],
initializer=embedding_loading_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(2))
@@ -347,6 +381,5 @@ class LoadAndRemapWrappersTest(test.TestCase):
self.assertAllClose(expected_remapped_embeddings,
remapped_embeddings.as_tensor().eval())
-
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/python/training/moving_averages.py b/tensorflow/python/training/moving_averages.py
index eb07343850..e34c759e89 100644
--- a/tensorflow/python/training/moving_averages.py
+++ b/tensorflow/python/training/moving_averages.py
@@ -498,8 +498,9 @@ class ExponentialMovingAverage(object):
# Collect all the variables with moving average,
for v in moving_avg_variables:
name_map[self.average_name(v)] = v
- # Make sure we restore variables without moving average as well.
- for v in list(set(variables.global_variables()) - moving_avg_variables):
- if v.op.name not in name_map:
+ # Make sure we restore variables without moving averages as well.
+ moving_avg_variable_names = set([v.name for v in moving_avg_variables])
+ for v in list(set(variables.global_variables())):
+ if v.name not in moving_avg_variable_names and v.op.name not in name_map:
name_map[v.op.name] = v
return name_map
diff --git a/tensorflow/python/training/moving_averages_test.py b/tensorflow/python/training/moving_averages_test.py
index 63604cf19d..6efdeb2866 100644
--- a/tensorflow/python/training/moving_averages_test.py
+++ b/tensorflow/python/training/moving_averages_test.py
@@ -27,6 +27,7 @@ from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import moving_averages
+from tensorflow.python.training import saver as saver_lib
class MovingAveragesTest(test.TestCase):
@@ -392,6 +393,32 @@ class ExponentialMovingAverageTest(test.TestCase):
self.assertEqual([b"loc:@v1"], ema.average(v1).op.colocation_groups())
self.assertDeviceEqual("/job:default", ema.average(tensor2).device)
+ def _ExportAndImportGraph(self, graph):
+ """Export and import graph into a new graph."""
+ meta_graph = saver_lib.export_meta_graph(
+ graph=graph, collection_list=graph.get_all_collection_keys())
+ graph_copy = ops.Graph()
+ with graph_copy.as_default():
+ _ = saver_lib.import_meta_graph(meta_graph)
+ return graph_copy
+
+ def testImportedGraphVariablesToRestore(self):
+ g = ops.Graph()
+ with g.as_default():
+ variables.Variable(10.0, name="v")
+ # Export and import the graph into a new graph.
+ g_copy = self._ExportAndImportGraph(g)
+ with g_copy.as_default():
+ ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
+ vars_to_restore = ema.variables_to_restore()
+ # There should only be one variable in vars_to_restore. This is important
+ # to check because when importing from a GraphDef, TF makes duplicate
+ # python Variable objects referring to the same underlying variable. We
+ # need to be sure that two variables referring to the same variable don't
+ # both get added to vars_to_restore.
+ self.assertEqual(len(vars_to_restore), 1)
+ self.assertTrue("v/foo_avg" in vars_to_restore)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/util/tf_should_use.py b/tensorflow/python/util/tf_should_use.py
index 99081cb294..a576547d5f 100644
--- a/tensorflow/python/util/tf_should_use.py
+++ b/tensorflow/python/util/tf_should_use.py
@@ -22,6 +22,7 @@ import types
import six # pylint: disable=unused-import
+from tensorflow.python.eager import context
from tensorflow.python.util import tf_decorator
# pylint: enable=g-bad-import-order,g-import-not-at-top
@@ -31,6 +32,8 @@ from tensorflow.python.util import tf_decorator
def _add_should_use_warning(x, fatal_error=False):
"""Wraps object x so that if it is never used, a warning is logged.
+ Does nothing when executing eagerly.
+
Args:
x: Python object.
fatal_error: Python bool. If `True`, tf.logging.fatal is raised
@@ -44,9 +47,10 @@ def _add_should_use_warning(x, fatal_error=False):
if x is None: # special corner case where x is None
return x
- # TODO(apassos) we don't have an easier way to check because importing context
- # or ops here would create a BUILD dependency cycle.
- if type(x).__name__ == 'EagerTensor':
+ if context.in_eager_mode():
+ # Typically not needed when executing eagerly (the main use case is for ops
+ # which need to be incorporated into the graph), and even the no-op wrapper
+ # creates reference cycles which require garbage collection.
return x
def override_method(method):
@@ -102,6 +106,8 @@ def should_use_result(fn):
- `t != 0`. In this case, comparison is done on types / ids.
- `isinstance(t, tf.Tensor)`. Similar to above.
+ Does nothing when executing eagerly.
+
Args:
fn: The function to wrap.
@@ -136,6 +142,8 @@ def must_use_result_or_fatal(fn):
- `t != 0`. In this case, comparison is done on types / ids.
- `isinstance(t, tf.Tensor)`. Similar to above.
+ Does nothing when executing eagerly.
+
Args:
fn: The function to wrap.
diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl
index 915880a3d0..16c3386e15 100644
--- a/tensorflow/tensorflow.bzl
+++ b/tensorflow/tensorflow.bzl
@@ -672,6 +672,11 @@ def tf_cuda_only_cc_test(name,
}),
tags=tags + tf_cuda_tests_tags())
+register_extension_info(
+ extension_name="tf_cuda_only_cc_test",
+ label_regex_for_dep="{extension_name}_gpu")
+
+
# Create a cc_test for each of the tensorflow tests listed in "tests"
def tf_cc_tests(srcs,
deps,
@@ -746,6 +751,11 @@ def tf_java_test(name,
*args,
**kwargs)
+register_extension_info(
+ extension_name="tf_java_test",
+ label_regex_for_dep="{extension_name}")
+
+
def _cuda_copts():
"""Gets the appropriate set of copts for (maybe) CUDA compilation.
@@ -790,6 +800,10 @@ def tf_gpu_kernel_library(srcs,
alwayslink=1,
**kwargs)
+register_extension_info(
+ extension_name="tf_gpu_kernel_library",
+ label_regex_for_dep="{extension_name}")
+
def tf_cuda_library(deps=None, cuda_deps=None, copts=None, **kwargs):
"""Generate a cc_library with a conditional set of CUDA dependencies.
@@ -937,6 +951,10 @@ def tf_mkl_kernel_library(name,
nocopts=nocopts
))
+register_extension_info(
+ extension_name="tf_mkl_kernel_library",
+ label_regex_for_dep="{extension_name}")
+
# Bazel rules for building swig files.
def _py_wrap_cc_impl(ctx):
@@ -1505,3 +1523,7 @@ def cc_library_with_android_deps(deps,
**kwargs):
deps = if_not_android(deps) + if_android(android_deps) + common_deps
native.cc_library(deps=deps, **kwargs)
+
+register_extension_info(
+ extension_name="cc_library_with_android_deps",
+ label_regex_for_dep="{extension_name}")
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt
new file mode 100644
index 0000000000..b6f9eea2de
--- /dev/null
+++ b/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt
@@ -0,0 +1,269 @@
+path: "tensorflow.keras.Model"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.topology.Network\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.topology.Layer\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "stateful"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "uses_learning_phase"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'inputs\', \'outputs\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compile"
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "evaluate_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=kwargs, defaults=[\'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\', \'steps_per_epoch\', \'validation_steps\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'1\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "fit_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'shuffle\', \'initial_epoch\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'True\', \'0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_layer"
+ argspec: "args=[\'self\', \'name\', \'index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_weights"
+ argspec: "args=[\'self\', \'filepath\', \'by_name\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\', \'steps\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\'], "
+ }
+ member_method {
+ name: "predict_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=kwargs, defaults=[\'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "predict_on_batch"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'include_optimizer\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "save_weights"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "summary"
+ argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "test_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "to_json"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "to_yaml"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "train_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt
new file mode 100644
index 0000000000..5076434dbb
--- /dev/null
+++ b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt
@@ -0,0 +1,294 @@
+path: "tensorflow.keras.Sequential"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras._impl.keras.models.Sequential\'>"
+ is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.topology.Network\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.topology.Layer\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "regularizers"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "stateful"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "uses_learning_phase"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'layers\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "add"
+ argspec: "args=[\'self\', \'layer\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compile"
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'sample_weight_mode\', \'weighted_metrics\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'32\', \'1\', \'None\'], "
+ }
+ member_method {
+ name: "evaluate_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=kwargs, defaults=[\'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\'], varargs=None, keywords=None, defaults=[\'32\', \'10\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\'], "
+ }
+ member_method {
+ name: "fit_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'initial_epoch\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_layer"
+ argspec: "args=[\'self\', \'name\', \'index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_weights"
+ argspec: "args=[\'self\', \'filepath\', \'by_name\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "pop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], "
+ }
+ member_method {
+ name: "predict_classes"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'1\'], "
+ }
+ member_method {
+ name: "predict_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=kwargs, defaults=[\'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "predict_on_batch"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict_proba"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'1\'], "
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'include_optimizer\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "save_weights"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "summary"
+ argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "test_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "to_json"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "to_yaml"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "train_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'class_weight\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.inception_resnet_v2.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.inception_resnet_v2.pbtxt
new file mode 100644
index 0000000000..211080c19b
--- /dev/null
+++ b/tensorflow/tools/api/golden/tensorflow.keras.applications.inception_resnet_v2.pbtxt
@@ -0,0 +1,15 @@
+path: "tensorflow.keras.applications.inception_resnet_v2"
+tf_module {
+ member_method {
+ name: "InceptionResNetV2"
+ argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
+ }
+ member_method {
+ name: "decode_predictions"
+ argspec: "args=[\'preds\', \'top\'], varargs=None, keywords=None, defaults=[\'5\'], "
+ }
+ member_method {
+ name: "preprocess_input"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.pbtxt
index f50dc7d7fe..daeb5aad41 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.applications.pbtxt
@@ -1,6 +1,10 @@
path: "tensorflow.keras.applications"
tf_module {
member {
+ name: "inception_resnet_v2"
+ mtype: "<type \'module\'>"
+ }
+ member {
name: "inception_v3"
mtype: "<type \'module\'>"
}
@@ -25,6 +29,10 @@ tf_module {
mtype: "<type \'module\'>"
}
member_method {
+ name: "InceptionResNetV2"
+ argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
+ }
+ member_method {
name: "InceptionV3"
argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.resnet50.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.resnet50.pbtxt
index 57c48df2e3..7385af064d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.resnet50.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.applications.resnet50.pbtxt
@@ -10,6 +10,6 @@ tf_module {
}
member_method {
name: "preprocess_input"
- argspec: "args=[\'x\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'x\', \'data_format\', \'mode\'], varargs=None, keywords=None, defaults=[\'None\', \'caffe\'], "
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg16.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg16.pbtxt
index 29d45daea4..ba66fba8f3 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg16.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg16.pbtxt
@@ -10,6 +10,6 @@ tf_module {
}
member_method {
name: "preprocess_input"
- argspec: "args=[\'x\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'x\', \'data_format\', \'mode\'], varargs=None, keywords=None, defaults=[\'None\', \'caffe\'], "
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg19.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg19.pbtxt
index 124aa7e5e5..e55a1345b6 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg19.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg19.pbtxt
@@ -10,6 +10,6 @@ tf_module {
}
member_method {
name: "preprocess_input"
- argspec: "args=[\'x\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'x\', \'data_format\', \'mode\'], varargs=None, keywords=None, defaults=[\'None\', \'caffe\'], "
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.pbtxt
index 77cfe33ac4..754b3b84b0 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.pbtxt
@@ -1,6 +1,14 @@
path: "tensorflow.keras"
tf_module {
member {
+ name: "Model"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Sequential"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "activations"
mtype: "<type \'module\'>"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.pbtxt b/tensorflow/tools/api/golden/tensorflow.linalg.pbtxt
index 0d62585ff4..9fd38a29b7 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.linalg.pbtxt
@@ -73,6 +73,10 @@ tf_module {
argspec: "args=[\'equation\'], varargs=inputs, keywords=kwargs, defaults=None"
}
member_method {
+ name: "expm"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "eye"
argspec: "args=[\'num_rows\', \'num_columns\', \'batch_shape\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \"<dtype: \'float32\'>\", \'None\'], "
}
diff --git a/tensorflow/tools/api/tests/api_compatibility_test.py b/tensorflow/tools/api/tests/api_compatibility_test.py
index 6a27f6bc42..a8fdf4c9a0 100644
--- a/tensorflow/tools/api/tests/api_compatibility_test.py
+++ b/tensorflow/tools/api/tests/api_compatibility_test.py
@@ -29,7 +29,6 @@ from __future__ import print_function
import argparse
from collections import defaultdict
-from operator import attrgetter
import os
import re
import subprocess
@@ -68,7 +67,6 @@ _API_GOLDEN_FOLDER = 'tensorflow/tools/api/golden'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
-_ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
_CONVERT_FROM_MULTILINE_SCRIPT = 'tensorflow/tools/api/tests/convert_from_multiline'
_BASE_API_DIR = 'tensorflow/core/api_def/base_api'
_PYTHON_API_DIR = 'tensorflow/core/api_def/python_api'
@@ -137,6 +135,16 @@ def _GetHiddenOps():
return hidden_ops
+def _GetGoldenApiDefs():
+ old_api_def_files = file_io.get_matching_files(_GetApiDefFilePath('*'))
+ return {file_path: file_io.read_file_to_string(file_path)
+ for file_path in old_api_def_files}
+
+
+def _GetApiDefFilePath(graph_op_name):
+ return os.path.join(_PYTHON_API_DIR, 'api_def_%s.pbtxt' % graph_op_name)
+
+
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
@@ -302,6 +310,14 @@ class ApiDefTest(test.TestCase):
endpoints in base_api_def. Otherwise, returns None.
"""
endpoint_names_set = set(endpoint_names)
+
+ # If the only endpoint is equal to graph_op_name then
+ # it is equivalent to having no endpoints.
+ if (not base_api_def.endpoint and len(endpoint_names) == 1
+ and endpoint_names[0] ==
+ self._GenerateLowerCaseOpName(base_api_def.graph_op_name)):
+ return None
+
base_endpoint_names_set = {
self._GenerateLowerCaseOpName(endpoint.name)
for endpoint in base_api_def.endpoint}
@@ -349,8 +365,8 @@ class ApiDefTest(test.TestCase):
Args:
name_to_base_api_def: Map from op name to base api_def_pb2.ApiDef.
- api_def_map: Map from first op name character (in caps) to
- api_def_pb2.ApiDefs for Python API overrides.
+ api_def_map: Map from file path to api_def_pb2.ApiDefs for Python API
+ overrides.
"""
hidden_ops = _GetHiddenOps()
for hidden_op in hidden_ops:
@@ -363,7 +379,9 @@ class ApiDefTest(test.TestCase):
api_def = api_def_pb2.ApiDef()
api_def.graph_op_name = base_api_def.graph_op_name
api_def.visibility = api_def_pb2.ApiDef.HIDDEN
- api_def_map[api_def.graph_op_name[0].upper()].op.extend([api_def])
+
+ file_path = _GetApiDefFilePath(base_api_def.graph_op_name)
+ api_def_map[file_path].op.extend([api_def])
@unittest.skipUnless(
sys.version_info.major == 2 and os.uname()[0] == 'Linux',
@@ -381,8 +399,8 @@ class ApiDefTest(test.TestCase):
traverse.traverse(tf, public_api_visitor)
proto_dict = visitor.GetProtos()
- # Map from first character of op name to Python ApiDefs.
- api_def_map = defaultdict(api_def_pb2.ApiDefs)
+ # Map from file path to Python ApiDefs.
+ new_api_defs_map = defaultdict(api_def_pb2.ApiDefs)
# We need to override all endpoints even if 1 endpoint differs from base
# ApiDef. So, we first create a map from an op to all its endpoints.
op_to_endpoint_name = defaultdict(list)
@@ -410,43 +428,45 @@ class ApiDefTest(test.TestCase):
graph_op_name = snake_to_camel_graph_op_names[op.__name__]
api_def = self._CreatePythonApiDef(
name_to_base_api_def[graph_op_name], endpoint_names)
+
if api_def:
- api_defs = api_def_map[graph_op_name[0].upper()]
+ file_path = _GetApiDefFilePath(graph_op_name)
+ api_defs = new_api_defs_map[file_path]
api_defs.op.extend([api_def])
- self._AddHiddenOpOverrides(name_to_base_api_def, api_def_map)
+ self._AddHiddenOpOverrides(name_to_base_api_def, new_api_defs_map)
- for key in _ALPHABET:
- # Get new ApiDef for the given key.
- new_api_defs_str = ''
- if key in api_def_map:
- new_api_defs = api_def_map[key]
- new_api_defs.op.sort(key=attrgetter('graph_op_name'))
- new_api_defs_str = str(new_api_defs)
+ old_api_defs_map = _GetGoldenApiDefs()
+ for file_path, new_api_defs in new_api_defs_map.items():
+ # Get new ApiDef string.
+ new_api_defs_str = str(new_api_defs)
- # Get current ApiDef for the given key.
- api_defs_file_path = os.path.join(
- _PYTHON_API_DIR, 'api_def_%s.pbtxt' % key)
- old_api_defs_str = ''
- if file_io.file_exists(api_defs_file_path):
- old_api_defs_str = file_io.read_file_to_string(api_defs_file_path)
+ # Get current ApiDef for the given file.
+ old_api_defs_str = (
+ old_api_defs_map[file_path] if file_path in old_api_defs_map else '')
if old_api_defs_str == new_api_defs_str:
continue
if FLAGS.update_goldens:
- if not new_api_defs_str:
- logging.info('Deleting %s...' % api_defs_file_path)
- file_io.delete_file(api_defs_file_path)
- else:
- logging.info('Updating %s...' % api_defs_file_path)
- file_io.write_string_to_file(api_defs_file_path, new_api_defs_str)
+ logging.info('Updating %s...' % file_path)
+ file_io.write_string_to_file(file_path, new_api_defs_str)
else:
self.assertMultiLineEqual(
old_api_defs_str, new_api_defs_str,
'To update golden API files, run api_compatibility_test locally '
'with --update_goldens=True flag.')
+ for file_path in set(old_api_defs_map) - set(new_api_defs_map):
+ if FLAGS.update_goldens:
+ logging.info('Deleting %s...' % file_path)
+ file_io.delete_file(file_path)
+ else:
+ self.fail(
+ '%s file is no longer needed and should be removed.'
+ 'To update golden API files, run api_compatibility_test locally '
+ 'with --update_goldens=True flag.' % file_path)
+
if __name__ == '__main__':
parser = argparse.ArgumentParser()
diff --git a/tensorflow/tools/ci_build/install/install_pip_packages.sh b/tensorflow/tools/ci_build/install/install_pip_packages.sh
index 352af87108..b8ed1ab767 100755
--- a/tensorflow/tools/ci_build/install/install_pip_packages.sh
+++ b/tensorflow/tools/ci_build/install/install_pip_packages.sh
@@ -31,6 +31,10 @@ pip3 install wheel
pip2 install --upgrade six==1.10.0
pip3 install --upgrade six==1.10.0
+# Install absl-py.
+pip2 install --upgrade absl-py
+pip3 install --upgrade absl-py
+
# Install werkzeug.
pip2 install --upgrade werkzeug==0.11.10
pip3 install --upgrade werkzeug==0.11.10
diff --git a/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh b/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh
index e452c50221..81bce95d54 100755
--- a/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh
+++ b/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh
@@ -61,6 +61,7 @@ fi
set -e
# Install six.
+pip3.5 install --upgrade absl-py
pip3.5 install --upgrade six==1.10.0
# Install protobuf.
diff --git a/tensorflow/tools/ci_build/windows/cpu/cmake/run_py.bat b/tensorflow/tools/ci_build/windows/cpu/cmake/run_py.bat
index 2f6d53e171..3c3b223a00 100644
--- a/tensorflow/tools/ci_build/windows/cpu/cmake/run_py.bat
+++ b/tensorflow/tools/ci_build/windows/cpu/cmake/run_py.bat
@@ -37,6 +37,9 @@ DIR %REPO_ROOT%\%BUILD_DIR%\tf_python\dist\ /S /B > wheel_filename_file
set /p WHEEL_FILENAME=<wheel_filename_file
del wheel_filename_file
+:: Install absl-py.
+%PIP_EXE% install --upgrade absl-py
+
:: Install the pip package.
echo Installing PIP package...
%PIP_EXE% install --upgrade --no-deps %WHEEL_FILENAME% -v -v
diff --git a/tensorflow/tools/ci_build/windows/gpu/cmake/run_py.bat b/tensorflow/tools/ci_build/windows/gpu/cmake/run_py.bat
index 02e24c85de..b537192a94 100644
--- a/tensorflow/tools/ci_build/windows/gpu/cmake/run_py.bat
+++ b/tensorflow/tools/ci_build/windows/gpu/cmake/run_py.bat
@@ -37,6 +37,9 @@ DIR %REPO_ROOT%\%BUILD_DIR%\tf_python\dist\ /S /B > wheel_filename_file
set /p WHEEL_FILENAME=<wheel_filename_file
del wheel_filename_file
+:: Install absl-py.
+%PIP_EXE% install --upgrade absl-py
+
:: Install the pip package.
echo Installing PIP package...
%PIP_EXE% install --upgrade --no-deps %WHEEL_FILENAME% -v -v
diff --git a/tensorflow/tools/docs/generate_lib.py b/tensorflow/tools/docs/generate_lib.py
index 9b8b50f9cd..c0cde1d3bd 100644
--- a/tensorflow/tools/docs/generate_lib.py
+++ b/tensorflow/tools/docs/generate_lib.py
@@ -152,19 +152,36 @@ def write_docs(output_dir, parser_config, yaml_toc, root_title='TensorFlow'):
# Generate header
f.write('# Automatically generated file; please do not edit\ntoc:\n')
for module in modules:
- f.write(' - title: ' + module + '\n'
- ' section:\n' + ' - title: Overview\n' +
- ' path: /TARGET_DOC_ROOT/VERSION/' + symbol_to_file[module]
- + '\n')
+ indent_num = module.count('.')
+ # Don't list `tf.submodule` inside `tf`
+ indent_num = max(indent_num, 1)
+ indent = ' '*indent_num
+
+ if indent_num > 1:
+ # tf.contrib.baysflow.entropy will be under
+ # tf.contrib->baysflow->entropy
+ title = module.split('.')[-1]
+ else:
+ title = module
+
+ header = [
+ '- title: ' + title,
+ ' section:',
+ ' - title: Overview',
+ ' path: /TARGET_DOC_ROOT/VERSION/' + symbol_to_file[module]]
+ header = ''.join([indent+line+'\n' for line in header])
+ f.write(header)
symbols_in_module = module_children.get(module, [])
# Sort case-insensitive, if equal sort case sensitive (upper first)
symbols_in_module.sort(key=lambda a: (a.upper(), a))
for full_name in symbols_in_module:
- f.write(' - title: ' + full_name[len(module) + 1:] + '\n'
- ' path: /TARGET_DOC_ROOT/VERSION/' +
- symbol_to_file[full_name] + '\n')
+ item = [
+ ' - title: ' + full_name[len(module) + 1:],
+ ' path: /TARGET_DOC_ROOT/VERSION/' + symbol_to_file[full_name]]
+ item = ''.join([indent+line+'\n' for line in item])
+ f.write(item)
# Write a global index containing all full names with links.
with open(os.path.join(output_dir, 'index.md'), 'w') as f:
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index 071b3a2a18..456c2e2908 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -32,6 +32,7 @@ from setuptools.dist import Distribution
_VERSION = '1.4.0-rc1'
REQUIRED_PACKAGES = [
+ 'absl-py',
'enum34 >= 1.1.6',
'numpy >= 1.12.1',
'six >= 1.10.0',
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index 0173f5a0d4..8ddfb1525a 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -354,6 +354,15 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
build_file = str(Label("//third_party:six.BUILD")),
)
+ native.http_archive(
+ name = "absl_py",
+ urls = [
+ "https://github.com/abseil/abseil-py/archive/231e3870b976c1dc61dce1749138661d21556028.tar.gz",
+ ],
+ sha256 = "8ea2b23bfdb9ae7622f3e5d95236bc600c8d8509a2f38c84732b3145585d4f73",
+ strip_prefix = "abseil-py-231e3870b976c1dc61dce1749138661d21556028",
+ )
+
native.new_http_archive(
name = "org_python_pypi_backports_weakref",
urls = [
@@ -439,11 +448,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
native.http_archive(
name = "nsync",
urls = [
- "https://mirror.bazel.build/github.com/google/nsync/archive/839fcc53ff9be58218ed55397deb3f8376a1444e.tar.gz",
- # "https://github.com/google/nsync/archive/839fcc53ff9be58218ed55397deb3f8376a1444e.tar.gz",
+ "https://mirror.bazel.build/github.com/google/nsync/archive/4fc8ff3e7626c5f24bc9674438d8257f0ffc226c.tar.gz",
+ # "https://github.com/google/nsync/archive/4fc8ff3e7626c5f24bc9674438d8257f0ffc226c.tar.gz",
],
- sha256 = "124d105edb0313ef2d7f5bb86ec94d9f8de95479e55641c4254ffa8f795e9b37",
- strip_prefix = "nsync-839fcc53ff9be58218ed55397deb3f8376a1444e",
+ sha256 = "ffbbe828f3d0bef75462e34801de5cea31d10aa63eaa42a4ed74c46521bdfd58",
+ strip_prefix = "nsync-4fc8ff3e7626c5f24bc9674438d8257f0ffc226c",
)
native.http_archive(
diff --git a/third_party/eigen.BUILD b/third_party/eigen.BUILD
index dc6de7bbda..07bb6645eb 100644
--- a/third_party/eigen.BUILD
+++ b/third_party/eigen.BUILD
@@ -27,7 +27,6 @@ EIGEN_RESTRICTED_DEPS = [
"Eigen/SparseLU",
]
-# Note: unsupported/Eigen is unsupported and might go away at any time.
EIGEN_FILES = [
"Eigen/**",
"unsupported/Eigen/CXX11/**",
@@ -37,6 +36,7 @@ EIGEN_FILES = [
"unsupported/Eigen/src/KroneckerProduct/**",
"unsupported/Eigen/MatrixFunctions",
"unsupported/Eigen/SpecialFunctions",
+ "unsupported/Eigen/src/MatrixFunctions/**",
"unsupported/Eigen/src/SpecialFunctions/**",
]
diff --git a/third_party/eigen3/BUILD b/third_party/eigen3/BUILD
index ad87477b7a..f5f3418527 100644
--- a/third_party/eigen3/BUILD
+++ b/third_party/eigen3/BUILD
@@ -26,6 +26,7 @@ cc_library(
"Eigen/Eigenvalues",
"Eigen/QR",
"Eigen/SVD",
+ "unsupported/Eigen/MatrixFunctions",
"unsupported/Eigen/SpecialFunctions",
"unsupported/Eigen/CXX11/ThreadPool",
"unsupported/Eigen/CXX11/Tensor",
diff --git a/third_party/eigen3/unsupported/Eigen/MatrixFunctions b/third_party/eigen3/unsupported/Eigen/MatrixFunctions
new file mode 100644
index 0000000000..314b325f8c
--- /dev/null
+++ b/third_party/eigen3/unsupported/Eigen/MatrixFunctions
@@ -0,0 +1 @@
+#include "unsupported/Eigen/MatrixFunctions"