aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Piotr Padlewski <prazek@google.com>2018-08-09 16:24:38 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-09 16:29:06 -0700
commitc38bf1ca5051b15356fe4219f36f7f44829fb2ae (patch)
tree67184c8c1f7cf0e2098371f658f830d8ad1410f9
parentb306f5f9458feddbdb89b7db557cb74dc9408d07 (diff)
Fix emplace_back(new) calls
Doing v.emplace_back(new Type) on a std::vector<std::unique_ptr<Type>> is not exception safe. This is because it cause a leak of passed pointer if emplace_back would throw exception before emplacement (e.g. not enough memory to add a new element). PiperOrigin-RevId: 208130121
-rw-r--r--tensorflow/compiler/xla/service/BUILD1
-rw-r--r--tensorflow/compiler/xla/service/service.cc7
-rw-r--r--tensorflow/contrib/lite/toco/tflite/BUILD1
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator.cc275
-rw-r--r--tensorflow/core/grappler/optimizers/meta_optimizer.cc47
-rw-r--r--tensorflow/core/kernels/data/shuffle_dataset_op.cc7
-rw-r--r--tensorflow/core/kernels/tensor_array_ops.cc9
7 files changed, 183 insertions, 164 deletions
diff --git a/tensorflow/compiler/xla/service/BUILD b/tensorflow/compiler/xla/service/BUILD
index 1b93d72a3e..3480ec4038 100644
--- a/tensorflow/compiler/xla/service/BUILD
+++ b/tensorflow/compiler/xla/service/BUILD
@@ -613,6 +613,7 @@ cc_library(
"//tensorflow/compiler/xla:xla_proto",
"//tensorflow/compiler/xla/legacy_flags:debug_options_flags",
"//tensorflow/core:lib",
+ "//tensorflow/core:ptr_util",
"//tensorflow/core:stream_executor_no_cuda",
],
alwayslink = 1,
diff --git a/tensorflow/compiler/xla/service/service.cc b/tensorflow/compiler/xla/service/service.cc
index 433560e322..1dbf540d13 100644
--- a/tensorflow/compiler/xla/service/service.cc
+++ b/tensorflow/compiler/xla/service/service.cc
@@ -53,6 +53,7 @@ limitations under the License.
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/ptr_util.h"
using ::tensorflow::strings::Printf;
using ::tensorflow::strings::StrCat;
@@ -408,7 +409,7 @@ Service::ExecuteParallelAndRegisterResult(
streams.push_back(std::move(stream));
if (replica == 0 && profile != nullptr) {
- timers.emplace_back(new se::Timer(streams.back()->parent()));
+ timers.push_back(MakeUnique<se::Timer>(streams.back()->parent()));
streams.back()
->InitTimer(timers.back().get())
.ThenStartTimer(timers.back().get());
@@ -440,7 +441,7 @@ Service::ExecuteParallelAndRegisterResult(
streams.back()->ThenStopTimer(timers.back().get());
}
- result_buffers.emplace_back(std::move(result));
+ result_buffers.push_back(std::move(result));
}
TF_ASSIGN_OR_RETURN(GlobalDataHandle handle,
allocation_tracker_.RegisterReplicatedBuffers(
@@ -558,7 +559,7 @@ StatusOr<GlobalDataHandle> Service::ExecuteAndRegisterResult(
std::vector<tensorflow::gtl::ArraySlice<const ShapedBuffer*>>
replicated_arguments;
for (const auto& arg : arguments) {
- replicated_arguments.emplace_back(arg);
+ replicated_arguments.push_back(arg);
}
TF_ASSIGN_OR_RETURN(auto results, executable->ExecuteOnStreams(
diff --git a/tensorflow/contrib/lite/toco/tflite/BUILD b/tensorflow/contrib/lite/toco/tflite/BUILD
index 83e977d7b3..709c53606b 100644
--- a/tensorflow/contrib/lite/toco/tflite/BUILD
+++ b/tensorflow/contrib/lite/toco/tflite/BUILD
@@ -27,6 +27,7 @@ cc_library(
"//tensorflow/contrib/lite/toco:graph_transformations",
"//tensorflow/contrib/lite/toco:model",
"//tensorflow/core:protos_all_cc",
+ "//tensorflow/core:ptr_util",
"@com_google_absl//absl/memory",
"@flatbuffers",
],
diff --git a/tensorflow/contrib/lite/toco/tflite/operator.cc b/tensorflow/contrib/lite/toco/tflite/operator.cc
index 9ff89e9a65..75808f2b69 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator.cc
@@ -21,9 +21,9 @@ limitations under the License.
#include "tensorflow/contrib/lite/toco/tflite/custom_operator.h"
#include "tensorflow/contrib/lite/toco/tflite/simple_operator.h"
#include "tensorflow/contrib/lite/toco/tflite/types.h"
-
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
+#include "tensorflow/core/util/ptr_util.h"
namespace toco {
@@ -1235,162 +1235,175 @@ namespace {
// Build a vector containing all the known operators.
std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
std::vector<std::unique_ptr<BaseOperator>> ops;
-
+ using tensorflow::MakeUnique;
// Builtin Operators.
- ops.emplace_back(new Add(::tflite::BuiltinOperator_ADD, OperatorType::kAdd));
- ops.emplace_back(new Div(::tflite::BuiltinOperator_DIV, OperatorType::kDiv));
- ops.emplace_back(new Sub(::tflite::BuiltinOperator_SUB, OperatorType::kSub));
- ops.emplace_back(new AveragePool(::tflite::BuiltinOperator_AVERAGE_POOL_2D,
- OperatorType::kAveragePool));
- ops.emplace_back(
- new SpaceToBatchND(::tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
- OperatorType::kSpaceToBatchND));
- ops.emplace_back(
- new BatchToSpaceND(::tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
- OperatorType::kBatchToSpaceND));
- ops.emplace_back(new Concatenation(::tflite::BuiltinOperator_CONCATENATION,
- OperatorType::kConcatenation));
- ops.emplace_back(
- new Convolution(::tflite::BuiltinOperator_CONV_2D, OperatorType::kConv));
- ops.emplace_back(
- new DepthwiseConvolution(::tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
- OperatorType::kDepthwiseConv));
- ops.emplace_back(new FullyConnected(::tflite::BuiltinOperator_FULLY_CONNECTED,
- OperatorType::kFullyConnected));
- ops.emplace_back(
- new Gather(::tflite::BuiltinOperator_GATHER, OperatorType::kGather));
- ops.emplace_back(
- new L2Normalization(::tflite::BuiltinOperator_L2_NORMALIZATION,
- OperatorType::kL2Normalization));
- ops.emplace_back(
- new L2Pool(::tflite::BuiltinOperator_L2_POOL_2D, OperatorType::kL2Pool));
- ops.emplace_back(new LocalResponseNormalization(
+ ops.push_back(
+ MakeUnique<Add>(::tflite::BuiltinOperator_ADD, OperatorType::kAdd));
+ ops.push_back(
+ MakeUnique<Div>(::tflite::BuiltinOperator_DIV, OperatorType::kDiv));
+ ops.push_back(
+ MakeUnique<Sub>(::tflite::BuiltinOperator_SUB, OperatorType::kSub));
+ ops.push_back(MakeUnique<AveragePool>(
+ ::tflite::BuiltinOperator_AVERAGE_POOL_2D, OperatorType::kAveragePool));
+ ops.push_back(
+ MakeUnique<SpaceToBatchND>(::tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
+ OperatorType::kSpaceToBatchND));
+ ops.push_back(
+ MakeUnique<BatchToSpaceND>(::tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
+ OperatorType::kBatchToSpaceND));
+ ops.push_back(MakeUnique<Concatenation>(
+ ::tflite::BuiltinOperator_CONCATENATION, OperatorType::kConcatenation));
+ ops.push_back(MakeUnique<Convolution>(::tflite::BuiltinOperator_CONV_2D,
+ OperatorType::kConv));
+ ops.push_back(MakeUnique<DepthwiseConvolution>(
+ ::tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
+ OperatorType::kDepthwiseConv));
+ ops.push_back(
+ MakeUnique<FullyConnected>(::tflite::BuiltinOperator_FULLY_CONNECTED,
+ OperatorType::kFullyConnected));
+ ops.push_back(MakeUnique<Gather>(::tflite::BuiltinOperator_GATHER,
+ OperatorType::kGather));
+ ops.push_back(
+ MakeUnique<L2Normalization>(::tflite::BuiltinOperator_L2_NORMALIZATION,
+ OperatorType::kL2Normalization));
+ ops.push_back(MakeUnique<L2Pool>(::tflite::BuiltinOperator_L2_POOL_2D,
+ OperatorType::kL2Pool));
+ ops.push_back(MakeUnique<LocalResponseNormalization>(
::tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
OperatorType::kLocalResponseNormalization));
- ops.emplace_back(new MaxPool(::tflite::BuiltinOperator_MAX_POOL_2D,
- OperatorType::kMaxPool));
- ops.emplace_back(new Mul(::tflite::BuiltinOperator_MUL, OperatorType::kMul));
- ops.emplace_back(new Pad(::tflite::BuiltinOperator_PAD, OperatorType::kPad));
- ops.emplace_back(
- new PadV2(::tflite::BuiltinOperator_PADV2, OperatorType::kPadV2));
- ops.emplace_back(
- new Reshape(::tflite::BuiltinOperator_RESHAPE, OperatorType::kReshape));
- ops.emplace_back(
- new Softmax(::tflite::BuiltinOperator_SOFTMAX, OperatorType::kSoftmax));
- ops.emplace_back(new SpaceToDepth(::tflite::BuiltinOperator_SPACE_TO_DEPTH,
- OperatorType::kSpaceToDepth));
- ops.emplace_back(
- new Svdf(::tflite::BuiltinOperator_SVDF, OperatorType::kSvdf));
- ops.emplace_back(new Transpose(::tflite::BuiltinOperator_TRANSPOSE,
- OperatorType::kTranspose));
- ops.emplace_back(
- new Mean(::tflite::BuiltinOperator_MEAN, OperatorType::kMean));
- ops.emplace_back(new Sum(::tflite::BuiltinOperator_SUM, OperatorType::kSum));
- ops.emplace_back(new ReduceProd(::tflite::BuiltinOperator_REDUCE_PROD,
- OperatorType::kReduceProd));
- ops.emplace_back(new ReduceMax(::tflite::BuiltinOperator_REDUCE_MAX,
- OperatorType::kReduceMax));
- ops.emplace_back(new ResizeBilinear(::tflite::BuiltinOperator_RESIZE_BILINEAR,
- OperatorType::kResizeBilinear));
- ops.emplace_back(
- new Squeeze(::tflite::BuiltinOperator_SQUEEZE, OperatorType::kSqueeze));
- ops.emplace_back(
- new Split(::tflite::BuiltinOperator_SPLIT, OperatorType::kSplit));
- ops.emplace_back(new StridedSlice(::tflite::BuiltinOperator_STRIDED_SLICE,
- OperatorType::kStridedSlice));
- ops.emplace_back(
- new TopK_V2(::tflite::BuiltinOperator_TOPK_V2, OperatorType::kTopK_V2));
- ops.emplace_back(
- new Lstm(::tflite::BuiltinOperator_LSTM, OperatorType::kLstmCell));
- ops.emplace_back(
- new Cast(::tflite::BuiltinOperator_CAST, OperatorType::kCast));
- ops.emplace_back(
- new ArgMax(::tflite::BuiltinOperator_ARG_MAX, OperatorType::kArgMax));
- ops.emplace_back(
- new ArgMin(::tflite::BuiltinOperator_ARG_MIN, OperatorType::kArgMin));
- ops.emplace_back(
- new Tile(::tflite::BuiltinOperator_TILE, OperatorType::kTile));
- ops.emplace_back(new ExpandDims(::tflite::BuiltinOperator_EXPAND_DIMS,
- OperatorType::kExpandDims));
- ops.emplace_back(new TransposeConv(::tflite::BuiltinOperator_TRANSPOSE_CONV,
- OperatorType::kTransposeConv));
- ops.emplace_back(new SparseToDense(::tflite::BuiltinOperator_SPARSE_TO_DENSE,
- OperatorType::kSparseToDense));
- ops.emplace_back(
- new Shape(::tflite::BuiltinOperator_SHAPE, OperatorType::kShape));
- ops.emplace_back(new FakeQuant(::tflite::BuiltinOperator_FAKE_QUANT,
- OperatorType::kFakeQuant));
- ops.emplace_back(
- new Pack(::tflite::BuiltinOperator_PACK, OperatorType::kPack));
- ops.emplace_back(
- new OneHot(::tflite::BuiltinOperator_ONE_HOT, OperatorType::kOneHot));
+ ops.push_back(MakeUnique<MaxPool>(::tflite::BuiltinOperator_MAX_POOL_2D,
+ OperatorType::kMaxPool));
+ ops.push_back(
+ MakeUnique<Mul>(::tflite::BuiltinOperator_MUL, OperatorType::kMul));
+ ops.push_back(
+ MakeUnique<Pad>(::tflite::BuiltinOperator_PAD, OperatorType::kPad));
+ ops.push_back(
+ MakeUnique<PadV2>(::tflite::BuiltinOperator_PADV2, OperatorType::kPadV2));
+ ops.push_back(MakeUnique<Reshape>(::tflite::BuiltinOperator_RESHAPE,
+ OperatorType::kReshape));
+ ops.push_back(MakeUnique<Softmax>(::tflite::BuiltinOperator_SOFTMAX,
+ OperatorType::kSoftmax));
+ ops.push_back(MakeUnique<SpaceToDepth>(
+ ::tflite::BuiltinOperator_SPACE_TO_DEPTH, OperatorType::kSpaceToDepth));
+ ops.push_back(
+ MakeUnique<Svdf>(::tflite::BuiltinOperator_SVDF, OperatorType::kSvdf));
+ ops.push_back(MakeUnique<Transpose>(::tflite::BuiltinOperator_TRANSPOSE,
+ OperatorType::kTranspose));
+ ops.push_back(
+ MakeUnique<Mean>(::tflite::BuiltinOperator_MEAN, OperatorType::kMean));
+ ops.push_back(
+ MakeUnique<Sum>(::tflite::BuiltinOperator_SUM, OperatorType::kSum));
+ ops.push_back(MakeUnique<ReduceProd>(::tflite::BuiltinOperator_REDUCE_PROD,
+ OperatorType::kReduceProd));
+ ops.push_back(MakeUnique<ReduceMax>(::tflite::BuiltinOperator_REDUCE_MAX,
+ OperatorType::kReduceMax));
+ ops.push_back(
+ MakeUnique<ResizeBilinear>(::tflite::BuiltinOperator_RESIZE_BILINEAR,
+ OperatorType::kResizeBilinear));
+ ops.push_back(MakeUnique<Squeeze>(::tflite::BuiltinOperator_SQUEEZE,
+ OperatorType::kSqueeze));
+ ops.push_back(
+ MakeUnique<Split>(::tflite::BuiltinOperator_SPLIT, OperatorType::kSplit));
+ ops.push_back(MakeUnique<StridedSlice>(
+ ::tflite::BuiltinOperator_STRIDED_SLICE, OperatorType::kStridedSlice));
+ ops.push_back(MakeUnique<TopK_V2>(::tflite::BuiltinOperator_TOPK_V2,
+ OperatorType::kTopK_V2));
+ ops.push_back(MakeUnique<Lstm>(::tflite::BuiltinOperator_LSTM,
+ OperatorType::kLstmCell));
+ ops.push_back(
+ MakeUnique<Cast>(::tflite::BuiltinOperator_CAST, OperatorType::kCast));
+ ops.push_back(MakeUnique<ArgMax>(::tflite::BuiltinOperator_ARG_MAX,
+ OperatorType::kArgMax));
+ ops.push_back(MakeUnique<ArgMin>(::tflite::BuiltinOperator_ARG_MIN,
+ OperatorType::kArgMin));
+ ops.push_back(
+ MakeUnique<Tile>(::tflite::BuiltinOperator_TILE, OperatorType::kTile));
+ ops.push_back(MakeUnique<ExpandDims>(::tflite::BuiltinOperator_EXPAND_DIMS,
+ OperatorType::kExpandDims));
+ ops.push_back(MakeUnique<TransposeConv>(
+ ::tflite::BuiltinOperator_TRANSPOSE_CONV, OperatorType::kTransposeConv));
+ ops.push_back(MakeUnique<SparseToDense>(
+ ::tflite::BuiltinOperator_SPARSE_TO_DENSE, OperatorType::kSparseToDense));
+ ops.push_back(
+ MakeUnique<Shape>(::tflite::BuiltinOperator_SHAPE, OperatorType::kShape));
+ ops.push_back(MakeUnique<FakeQuant>(::tflite::BuiltinOperator_FAKE_QUANT,
+ OperatorType::kFakeQuant));
+ ops.push_back(
+ MakeUnique<Pack>(::tflite::BuiltinOperator_PACK, OperatorType::kPack));
+ ops.push_back(MakeUnique<OneHot>(::tflite::BuiltinOperator_ONE_HOT,
+ OperatorType::kOneHot));
// Custom Operators.
- ops.emplace_back(
- new DepthToSpace("DEPTH_TO_SPACE", OperatorType::kDepthToSpace));
- ops.emplace_back(new CTCBeamSearchDecoder(
+ ops.push_back(
+ MakeUnique<DepthToSpace>("DEPTH_TO_SPACE", OperatorType::kDepthToSpace));
+ ops.push_back(MakeUnique<CTCBeamSearchDecoder>(
"CTC_BEAM_SEARCH_DECODER", OperatorType::kCTCBeamSearchDecoder));
- ops.emplace_back(new TensorFlowUnsupported("TENSORFLOW_UNSUPPORTED",
- OperatorType::kUnsupported));
+ ops.push_back(MakeUnique<TensorFlowUnsupported>("TENSORFLOW_UNSUPPORTED",
+ OperatorType::kUnsupported));
// There operators are supported by Toco, but not by TF Lite, and has no
// attributes.
- ops.emplace_back(
- new SimpleOperator<AddNOperator>("ADDN", OperatorType::kAddN));
+ ops.push_back(
+ MakeUnique<SimpleOperator<AddNOperator>>("ADDN", OperatorType::kAddN));
// Simple Operators.
- ops.emplace_back(new SimpleOperator<DequantizeOperator>(
+ ops.push_back(MakeUnique<SimpleOperator<DequantizeOperator>>(
"DEQUANTIZE", OperatorType::kDequantize));
- ops.emplace_back(
- new SimpleOperator<FloorOperator>("FLOOR", OperatorType::kFloor));
- ops.emplace_back(
- new SimpleOperator<ReluOperator>("RELU", OperatorType::kRelu));
- ops.emplace_back(
- new SimpleOperator<Relu1Operator>("RELU_N1_TO_1", OperatorType::kRelu1));
- ops.emplace_back(
- new SimpleOperator<Relu6Operator>("RELU6", OperatorType::kRelu6));
- ops.emplace_back(
- new SimpleOperator<PReluOperator>("PRELU", OperatorType::kPRelu));
- ops.emplace_back(new SimpleOperator<LogisticOperator>(
+ ops.push_back(
+ MakeUnique<SimpleOperator<FloorOperator>>("FLOOR", OperatorType::kFloor));
+ ops.push_back(
+ MakeUnique<SimpleOperator<ReluOperator>>("RELU", OperatorType::kRelu));
+ ops.push_back(MakeUnique<SimpleOperator<Relu1Operator>>(
+ "RELU_N1_TO_1", OperatorType::kRelu1));
+ ops.push_back(
+ MakeUnique<SimpleOperator<Relu6Operator>>("RELU6", OperatorType::kRelu6));
+ ops.push_back(
+ MakeUnique<SimpleOperator<PReluOperator>>("PRELU", OperatorType::kPRelu));
+ ops.push_back(MakeUnique<SimpleOperator<LogisticOperator>>(
"LOGISTIC", OperatorType::kLogistic));
- ops.emplace_back(
- new SimpleOperator<TanhOperator>("TANH", OperatorType::kTanh));
- ops.emplace_back(new SimpleOperator<ExpOperator>("EXP", OperatorType::kExp));
- ops.emplace_back(new SimpleOperator<LogSoftmaxOperator>(
+ ops.push_back(
+ MakeUnique<SimpleOperator<TanhOperator>>("TANH", OperatorType::kTanh));
+ ops.push_back(
+ MakeUnique<SimpleOperator<ExpOperator>>("EXP", OperatorType::kExp));
+ ops.push_back(MakeUnique<SimpleOperator<LogSoftmaxOperator>>(
"LOG_SOFTMAX", OperatorType::kLogSoftmax));
- ops.emplace_back(new SimpleOperator<TensorFlowMaximumOperator>(
+ ops.push_back(MakeUnique<SimpleOperator<TensorFlowMaximumOperator>>(
"MAXIMUM", OperatorType::kMaximum)); // Element-wise Maximum
- ops.emplace_back(new SimpleOperator<TensorFlowMinimumOperator>(
+ ops.push_back(MakeUnique<SimpleOperator<TensorFlowMinimumOperator>>(
"MINIMUM", OperatorType::kMinimum)); // Element-wise Minimum
- ops.emplace_back(new SimpleOperator<TensorFlowGreaterOperator>(
+ ops.push_back(MakeUnique<SimpleOperator<TensorFlowGreaterOperator>>(
"GREATER", OperatorType::kGreater));
- ops.emplace_back(new SimpleOperator<TensorFlowGreaterEqualOperator>(
+ ops.push_back(MakeUnique<SimpleOperator<TensorFlowGreaterEqualOperator>>(
"GREATER_EQUAL", OperatorType::kGreaterEqual));
- ops.emplace_back(
- new SimpleOperator<TensorFlowLessOperator>("LESS", OperatorType::kLess));
- ops.emplace_back(new SimpleOperator<TensorFlowLessEqualOperator>(
+ ops.push_back(MakeUnique<SimpleOperator<TensorFlowLessOperator>>(
+ "LESS", OperatorType::kLess));
+ ops.push_back(MakeUnique<SimpleOperator<TensorFlowLessEqualOperator>>(
"LESS_EQUAL", OperatorType::kLessEqual));
- ops.emplace_back(new SimpleOperator<TensorFlowEqualOperator>(
+ ops.push_back(MakeUnique<SimpleOperator<TensorFlowEqualOperator>>(
"EQUAL", OperatorType::kEqual));
- ops.emplace_back(new SimpleOperator<TensorFlowNotEqualOperator>(
+ ops.push_back(MakeUnique<SimpleOperator<TensorFlowNotEqualOperator>>(
"NOT_EQUAL", OperatorType::kNotEqual));
- ops.emplace_back(new SimpleOperator<NegOperator>("NEG", OperatorType::kNeg));
- ops.emplace_back(
- new SimpleOperator<SelectOperator>("SELECT", OperatorType::kSelect));
- ops.emplace_back(
- new SimpleOperator<SliceOperator>("SLICE", OperatorType::kSlice));
- ops.emplace_back(new SimpleOperator<PowOperator>("POW", OperatorType::kPow));
- ops.emplace_back(new SimpleOperator<LogicalOrOperator>(
+ ops.push_back(
+ MakeUnique<SimpleOperator<NegOperator>>("NEG", OperatorType::kNeg));
+ ops.push_back(MakeUnique<SimpleOperator<SelectOperator>>(
+ "SELECT", OperatorType::kSelect));
+ ops.push_back(
+ MakeUnique<SimpleOperator<SliceOperator>>("SLICE", OperatorType::kSlice));
+ ops.push_back(
+ MakeUnique<SimpleOperator<PowOperator>>("POW", OperatorType::kPow));
+ ops.push_back(MakeUnique<SimpleOperator<LogicalOrOperator>>(
"LOGICAL_OR", OperatorType::kLogicalOr));
ops.emplace_back(new SimpleOperator<LogicalAndOperator>(
"LOGICAL_AND", OperatorType::kLogicalAnd));
ops.emplace_back(new SimpleOperator<LogicalNotOperator>(
"LOGICAL_NOT", OperatorType::kLogicalNot));
// Element-wise operator
- ops.emplace_back(new SimpleOperator<SinOperator>("SIN", OperatorType::kSin));
- ops.emplace_back(new SimpleOperator<LogOperator>("LOG", OperatorType::kLog));
- ops.emplace_back(
- new SimpleOperator<TensorFlowSqrtOperator>("SQRT", OperatorType::kSqrt));
- ops.emplace_back(new SimpleOperator<TensorFlowRsqrtOperator>(
+ ops.push_back(
+ MakeUnique<SimpleOperator<SinOperator>>("SIN", OperatorType::kSin));
+ ops.push_back(
+ MakeUnique<SimpleOperator<LogOperator>>("LOG", OperatorType::kLog));
+ ops.push_back(MakeUnique<SimpleOperator<TensorFlowSqrtOperator>>(
+ "SQRT", OperatorType::kSqrt));
+ ops.push_back(MakeUnique<SimpleOperator<TensorFlowRsqrtOperator>>(
"RSQRT", OperatorType::kRsqrt));
return ops;
diff --git a/tensorflow/core/grappler/optimizers/meta_optimizer.cc b/tensorflow/core/grappler/optimizers/meta_optimizer.cc
index 96f6fe1e0b..e42a7807e4 100644
--- a/tensorflow/core/grappler/optimizers/meta_optimizer.cc
+++ b/tensorflow/core/grappler/optimizers/meta_optimizer.cc
@@ -35,6 +35,7 @@ limitations under the License.
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/util/ptr_util.h"
namespace tensorflow {
namespace grappler {
@@ -102,57 +103,57 @@ std::unique_ptr<GraphOptimizer> MetaOptimizer::MakeNewOptimizer(
Status MetaOptimizer::InitializeOptimizers(
std::vector<std::unique_ptr<GraphOptimizer>>* optimizers) const {
if (!cfg_.disable_model_pruning()) {
- optimizers->emplace_back(new ModelPruner());
+ optimizers->push_back(MakeUnique<ModelPruner>());
}
if (cfg_.function_optimization() != RewriterConfig::OFF) {
- optimizers->emplace_back(
- new FunctionOptimizer(cfg_.function_optimization()));
+ optimizers->push_back(
+ MakeUnique<FunctionOptimizer>(cfg_.function_optimization()));
}
if (cfg_.debug_stripper() == RewriterConfig::ON) {
- optimizers->emplace_back(new DebugStripper());
+ optimizers->push_back(MakeUnique<DebugStripper>());
}
if (cfg_.constant_folding() != RewriterConfig::OFF) {
- optimizers->emplace_back(
- new ConstantFolding(cfg_.constant_folding(), cpu_device_));
+ optimizers->push_back(
+ MakeUnique<ConstantFolding>(cfg_.constant_folding(), cpu_device_));
}
if (cfg_.shape_optimization() != RewriterConfig::OFF) {
- optimizers->emplace_back(new ShapeOptimizer());
+ optimizers->push_back(MakeUnique<ShapeOptimizer>());
}
if (cfg_.remapping() != RewriterConfig::OFF) {
- optimizers->emplace_back(new Remapper(cfg_.remapping()));
+ optimizers->push_back(MakeUnique<Remapper>(cfg_.remapping()));
}
if (cfg_.arithmetic_optimization() != RewriterConfig::OFF) {
- optimizers->emplace_back(
- new ArithmeticOptimizer(cfg_.arithmetic_optimization()));
+ optimizers->push_back(
+ MakeUnique<ArithmeticOptimizer>(cfg_.arithmetic_optimization()));
}
if (cfg_.loop_optimization() != RewriterConfig::OFF) {
- optimizers->emplace_back(
- new LoopOptimizer(cfg_.loop_optimization(), cpu_device_));
+ optimizers->push_back(
+ MakeUnique<LoopOptimizer>(cfg_.loop_optimization(), cpu_device_));
}
if (cfg_.dependency_optimization() != RewriterConfig::OFF) {
- optimizers->emplace_back(
- new DependencyOptimizer(cfg_.dependency_optimization()));
+ optimizers->push_back(
+ MakeUnique<DependencyOptimizer>(cfg_.dependency_optimization()));
}
if (cfg_.layout_optimizer() != RewriterConfig::OFF) {
- optimizers->emplace_back(new LayoutOptimizer());
+ optimizers->push_back(MakeUnique<LayoutOptimizer>());
}
if (cfg_.memory_optimization() != RewriterConfig::NO_MEM_OPT) {
if (cfg_.memory_optimizer_target_node_name_scope().empty()) {
- optimizers->emplace_back(
+ optimizers->push_back(
// Use the default target node name prefix "gradients/"
- new MemoryOptimizer(cfg_.memory_optimization()));
+ MakeUnique<MemoryOptimizer>(cfg_.memory_optimization()));
} else {
- optimizers->emplace_back(
- new MemoryOptimizer(cfg_.memory_optimization(),
- cfg_.memory_optimizer_target_node_name_scope()));
+ optimizers->push_back(MakeUnique<MemoryOptimizer>(
+ cfg_.memory_optimization(),
+ cfg_.memory_optimizer_target_node_name_scope()));
}
}
if (cfg_.auto_parallel().enable()) {
- optimizers->emplace_back(
- new AutoParallel(cfg_.auto_parallel().num_replicas()));
+ optimizers->push_back(
+ MakeUnique<AutoParallel>(cfg_.auto_parallel().num_replicas()));
}
if (cfg_.scoped_allocator_optimization()) {
- optimizers->emplace_back(new ScopedAllocatorOptimizer(
+ optimizers->push_back(MakeUnique<ScopedAllocatorOptimizer>(
cfg_.scoped_allocator_optimization(), cfg_.scoped_allocator_opts()));
}
return Status::OK();
diff --git a/tensorflow/core/kernels/data/shuffle_dataset_op.cc b/tensorflow/core/kernels/data/shuffle_dataset_op.cc
index b859295fa4..41395476eb 100644
--- a/tensorflow/core/kernels/data/shuffle_dataset_op.cc
+++ b/tensorflow/core/kernels/data/shuffle_dataset_op.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
+#include "tensorflow/core/util/ptr_util.h"
namespace tensorflow {
@@ -75,7 +76,7 @@ class ShuffleDatasetOpBase : public UnaryDatasetOpKernel {
parent_generator_(seed, seed2),
generator_(&parent_generator_) {
buffer_.reset(new std::vector<Tensor>[params.dataset->buffer_size_]);
- slices_.emplace_back(new Slice{0, 0});
+ slices_.push_back(MakeUnique<Slice>(0, 0));
}
Status GetNextInternal(IteratorContext* ctx,
@@ -118,7 +119,7 @@ class ShuffleDatasetOpBase : public UnaryDatasetOpKernel {
}
epoch_++;
int64 n = slices_.back()->end;
- slices_.emplace_back(new Slice{n, n});
+ slices_.push_back(MakeUnique<Slice>(n, n));
TF_RETURN_IF_ERROR(this->dataset()->input_->MakeIterator(
ctx, this->prefix(), &input_impl_));
}
@@ -251,7 +252,7 @@ class ShuffleDatasetOpBase : public UnaryDatasetOpKernel {
int64 end;
TF_RETURN_IF_ERROR(reader->ReadScalar(
this->full_name(strings::StrCat("slices_end_", i)), &end));
- slices_.emplace_back(new Slice{start, end});
+ slices_.push_back(MakeUnique<Slice>(start, end));
for (size_t j = start; j < end; ++j) {
size_t index = j % this->dataset()->buffer_size_;
int64 list_size;
diff --git a/tensorflow/core/kernels/tensor_array_ops.cc b/tensorflow/core/kernels/tensor_array_ops.cc
index 5aa5d20b1a..b368ffc875 100644
--- a/tensorflow/core/kernels/tensor_array_ops.cc
+++ b/tensorflow/core/kernels/tensor_array_ops.cc
@@ -40,6 +40,7 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/util/ptr_util.h"
typedef Eigen::ThreadPoolDevice CPUDevice;
#if GOOGLE_CUDA
@@ -683,7 +684,7 @@ class TensorArrayPackOrGatherOp : public OpKernel {
output_tensor->shaped<T, 2>({1, output_shape.num_elements()});
// Insert the first value
- input_tensors_flat.emplace_back(new ConstMatrix(
+ input_tensors_flat.push_back(MakeUnique<ConstMatrix>(
value_0_t->shaped<T, 2>({1, value_0_t->NumElements()})));
for (int i = 1; i < num_indices; ++i) {
@@ -694,8 +695,8 @@ class TensorArrayPackOrGatherOp : public OpKernel {
"TensorArray has inconsistent shapes. Index 0 has shape: ",
value_0_t->shape().DebugString(), " but index ", i,
" has shape: ", value_t->shape().DebugString()));
- input_tensors_flat.emplace_back(
- new ConstMatrix(value_t->shaped<T, 2>({1, value_t->NumElements()})));
+ input_tensors_flat.push_back(MakeUnique<ConstMatrix>(
+ value_t->shaped<T, 2>({1, value_t->NumElements()})));
}
#if GOOGLE_CUDA
@@ -922,7 +923,7 @@ class TensorArrayConcatOp : public OpKernel {
for (size_t i = 0; i < values.size(); ++i) {
const Tensor* value_t = value_tensors[i];
if (value_t->NumElements() > 0) {
- input_tensors_flat.emplace_back(new ConstMatrix(
+ input_tensors_flat.push_back(MakeUnique<ConstMatrix>(
value_t->shaped<T, 2>({1, value_t->NumElements()})));
}
}