diff options
author | Sami Kama <skama@nvidia.com> | 2018-01-19 22:58:50 +0000 |
---|---|---|
committer | Sami Kama <skama@nvidia.com> | 2018-01-19 22:58:50 +0000 |
commit | 825e7a32e9f4dbad21a9ddb9d8a34bd3e32b1d0e (patch) | |
tree | ab8f8406065cac5ada579b571a1ebcc6338f5365 /tensorflow/contrib/tensorrt/ops | |
parent | e810b107d81a0016417b100bd89fd53e065e8d14 (diff) |
Introducing TensortRT Operator to TF which can run (sub)graphs in
highly optimized TensorRT engines. This commit is a merged version of
many commits by
benbarsdell <bbarsdell at nvidia.com>
deadeyegoodwin <davidg at nvidia.com
jjsjann123 <jiej at nvidia.com>
samikama <skama at nvidia.com>
Diffstat (limited to 'tensorflow/contrib/tensorrt/ops')
-rw-r--r-- | tensorflow/contrib/tensorrt/ops/trt_engine_op.cc | 37 |
1 files changed, 37 insertions, 0 deletions
diff --git a/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc b/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc new file mode 100644 index 0000000000..38d3707190 --- /dev/null +++ b/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc @@ -0,0 +1,37 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/tensor_shape.h" + +namespace tensorflow { + +namespace shape_inference { +extern Status TRTEngineOpShapeInference(InferenceContext* c); +} + +REGISTER_OP("TRTEngineOp") + .Attr("serialized_engine: string") + .Attr("input_nodes: list(string)") + .Attr("output_nodes: list(string)") + .Attr("InT: list({int8, float16, float32})") + .Attr("OutT: list({int8, float16, float32})") + .Input("in_tensor: InT") + .Output("out_tensor: OutT") + .SetShapeFn(shape_inference::TRTEngineOpShapeInference); + +} // namespace tensorflow |