aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/profiler
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-01-12 13:20:27 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-01-12 13:24:39 -0800
commit5a814028e2c0531b9936ac3286ab43e3670f36c6 (patch)
tree8437a60bf74cad9559ef9dcb7e39e9e10e1586d7 /tensorflow/python/profiler
parentce10f988fc5417b1df17bb2905dc77943136db85 (diff)
tfprof support for eager.
PiperOrigin-RevId: 181784078
Diffstat (limited to 'tensorflow/python/profiler')
-rw-r--r--tensorflow/python/profiler/model_analyzer.py85
-rw-r--r--tensorflow/python/profiler/model_analyzer_test.py21
-rw-r--r--tensorflow/python/profiler/tfprof_logger.py15
3 files changed, 86 insertions, 35 deletions
diff --git a/tensorflow/python/profiler/model_analyzer.py b/tensorflow/python/profiler/model_analyzer.py
index f5caeba518..8f78054560 100644
--- a/tensorflow/python/profiler/model_analyzer.py
+++ b/tensorflow/python/profiler/model_analyzer.py
@@ -28,7 +28,9 @@ from google.protobuf import message
from tensorflow.core.profiler import tfprof_options_pb2
from tensorflow.core.profiler import tfprof_output_pb2
from tensorflow.python import pywrap_tensorflow as print_mdl
+from tensorflow.python.eager import context
from tensorflow.python.framework import errors
+from tensorflow.python.framework import ops
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import tfprof_logger
@@ -45,6 +47,14 @@ ALL_ADVICE = {
}
+def _graph_string(graph):
+ """Helper to serialize a graph to string."""
+ if graph:
+ return graph.as_graph_def(add_shapes=True).SerializeToString()
+ else:
+ return b''
+
+
def _build_options(options):
"""Build tfprof.OptionsProto.
@@ -151,24 +161,25 @@ class Profiler(object):
```
"""
- def __init__(self, graph, op_log=None):
+ def __init__(self, graph=None, op_log=None):
"""Constructor.
Args:
- graph: tf.Graph.
+ graph: tf.Graph. If None and eager execution is not enabled, use
+ default graph.
op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
extra op types.
"""
+ if not graph and context.in_graph_mode():
+ graph = ops.get_default_graph()
self._coverage = 0.0
self._graph = graph
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, op_log=op_log)
# pylint: enable=protected-access
-
print_mdl.NewProfiler(
- self._graph.as_graph_def(add_shapes=True).SerializeToString(),
- op_log.SerializeToString())
+ _graph_string(self._graph), op_log.SerializeToString())
def __del__(self):
print_mdl.DeleteProfiler()
@@ -187,10 +198,9 @@ class Profiler(object):
self._graph, run_meta=run_meta)
# pylint: enable=protected-access
# TODO(xpan): P1: Better to find the current graph.
- self._coverage = print_mdl.AddStep(
- step,
- self._graph.as_graph_def(add_shapes=True).SerializeToString(),
- run_meta.SerializeToString(), op_log.SerializeToString())
+ self._coverage = print_mdl.AddStep(step, _graph_string(self._graph),
+ run_meta.SerializeToString(),
+ op_log.SerializeToString())
def profile_python(self, options):
"""Profile the statistics of the Python codes.
@@ -278,12 +288,23 @@ class Profiler(object):
print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))
return advise_pb
+ def serialize_to_string(self):
+ """Serialize the ProfileProto to a binary string.
+
+ Users can write it to file for offline analysis by tfprof commandline
+ or graphical interface.
+
+ Returns:
+ ProfileProto binary string.
+ """
+ return print_mdl.SerializeToString()
+
def _write_profile(self, filename):
"""Writes the profile to a file."""
print_mdl.WriteProfile(filename)
-def profile(graph,
+def profile(graph=None,
run_meta=None,
op_log=None,
cmd='scope',
@@ -294,7 +315,8 @@ def profile(graph,
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
- graph: required tf.Graph.
+ graph: tf.Graph. If None and eager execution is not enabled, use
+ default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
op_log: tensorflow.tfprof.OpLogProto proto. User can assign "types" to
@@ -311,10 +333,12 @@ def profile(graph,
If cmd is 'op' or 'code', returns MultiGraphNodeProto proto.
Side effect: stdout/file/timeline.json depending on options['output']
"""
+ if not graph and context.in_graph_mode():
+ graph = ops.get_default_graph()
+
if options == _DEFAULT_PROFILE_OPTIONS:
options = (option_builder.ProfileOptionBuilder
.trainable_variables_parameter())
-
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, op_log, run_meta, add_trace=cmd == 'code')
@@ -324,14 +348,14 @@ def profile(graph,
run_meta_str = run_meta.SerializeToString() if run_meta else b''
+ graph_str = _graph_string(graph)
+
if cmd == 'code' or cmd == 'op':
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
- ret = print_mdl.PrintModelAnalysis(
- graph.as_graph_def(add_shapes=True).SerializeToString(),
- run_meta_str,
- op_log.SerializeToString(),
- cmd.encode('utf-8'),
- opts.SerializeToString())
+ ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
+ op_log.SerializeToString(),
+ cmd.encode('utf-8'),
+ opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
@@ -339,12 +363,10 @@ def profile(graph,
elif cmd == 'graph' or cmd == 'scope':
tfprof_node = tfprof_output_pb2.GraphNodeProto()
- ret = print_mdl.PrintModelAnalysis(
- graph.as_graph_def(add_shapes=True).SerializeToString(),
- run_meta_str,
- op_log.SerializeToString(),
- cmd.encode('utf-8'),
- opts.SerializeToString())
+ ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
+ op_log.SerializeToString(),
+ cmd.encode('utf-8'),
+ opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
@@ -356,7 +378,7 @@ def profile(graph,
return tfprof_node
-def advise(graph, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
+def advise(graph=None, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
"""Auto profile and advise.
Builds profiles and automatically check anomalies of various
@@ -364,13 +386,17 @@ def advise(graph, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
- graph: required tf.Graph.
+ graph: tf.Graph. If None and eager execution is not enabled, use
+ default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
options: see ALL_ADVICE example above. Default checks everything.
Returns:
Returns AdviceProto proto
"""
+ if not graph and context.in_eager_execution():
+ graph = ops.get_default_graph()
+
if options == _DEFAULT_ADVISE_OPTIONS:
options = ALL_ADVICE.copy()
@@ -385,9 +411,6 @@ def advise(graph, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
ret = tfprof_output_pb2.AdviceProto()
ret.ParseFromString(
print_mdl.PrintModelAnalysis(
- graph.as_graph_def(add_shapes=True).SerializeToString(),
- run_meta_str,
- op_log.SerializeToString(),
- 'advise'.encode('utf-8'),
- opts.SerializeToString()))
+ _graph_string(graph), run_meta_str, op_log.SerializeToString(),
+ 'advise'.encode('utf-8'), opts.SerializeToString()))
return ret
diff --git a/tensorflow/python/profiler/model_analyzer_test.py b/tensorflow/python/profiler/model_analyzer_test.py
index 0ad4e0ef17..2d6766f390 100644
--- a/tensorflow/python/profiler/model_analyzer_test.py
+++ b/tensorflow/python/profiler/model_analyzer_test.py
@@ -26,6 +26,7 @@ import re
import numpy as np
from tensorflow.core.profiler import profile_pb2
+from tensorflow.core.profiler import tfprof_log_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
@@ -766,11 +767,27 @@ class PrintModelAnalysisTest(test.TestCase):
def testEager(self):
ops.reset_default_graph()
with context.eager_mode():
+ outfile = os.path.join(test.get_temp_dir(), 'dump')
+ opts = builder(
+ builder.time_and_memory()).with_file_output(outfile).build()
context.enable_run_metadata()
lib.BuildSmallModel()
- run_meta = context.export_run_metadata()
- self.assertTrue('Conv2D' in '%s' % run_meta)
+
+ profiler = model_analyzer.Profiler()
+ profiler.add_step(0, context.export_run_metadata())
context.disable_run_metadata()
+ profiler.profile_operations(opts)
+ with gfile.Open(outfile, 'r') as f:
+ out_str = f.read()
+ self.assertTrue('Conv2D' in out_str)
+ self.assertTrue('VarHandleOp' in out_str)
+
+ with gfile.Open('/tmp/eager_profile', 'wb') as f:
+ profile_pb = tfprof_log_pb2.ProfileProto()
+ profile_pb.ParseFromString(profiler.serialize_to_string())
+ profile_pb_str = '%s' % profile_pb
+ self.assertTrue('Conv2D' in profile_pb_str)
+ self.assertTrue('VarHandleOp' in profile_pb_str)
if __name__ == '__main__':
diff --git a/tensorflow/python/profiler/tfprof_logger.py b/tensorflow/python/profiler/tfprof_logger.py
index 15c273794d..ffda7ddad7 100644
--- a/tensorflow/python/profiler/tfprof_logger.py
+++ b/tensorflow/python/profiler/tfprof_logger.py
@@ -25,6 +25,7 @@ import sys
import six
from tensorflow.core.profiler import tfprof_log_pb2
+from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
@@ -144,7 +145,8 @@ def merge_default_with_oplog(graph, op_log=None, run_meta=None,
"""Merge the tfprof default extra info with caller's op_log.
Args:
- graph: tf.Graph.
+ graph: tf.Graph. If None and eager execution is not enabled, use
+ default graph.
op_log: OpLogProto proto.
run_meta: RunMetadata proto used to complete shape information.
add_trace: Whether to add op trace information.
@@ -153,7 +155,13 @@ def merge_default_with_oplog(graph, op_log=None, run_meta=None,
Returns:
tmp_op_log: Merged OpLogProto proto.
"""
+ if not graph and context.in_graph_mode():
+ graph = ops.get_default_graph()
+
tmp_op_log = tfprof_log_pb2.OpLogProto()
+ if not graph:
+ return tmp_op_log
+
logged_ops, string_to_id = _get_logged_ops(
graph, run_meta, add_trace=add_trace, add_trainable_var=add_trainable_var)
@@ -190,7 +198,8 @@ def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):
information with best effort.
Args:
- graph: tf.Graph.
+ graph: tf.Graph. If None and eager execution is not enabled, use
+ default graph.
log_dir: directory to write the log file.
op_log: (Optional) OpLogProto proto to be written. If not provided, an new
one is created.
@@ -199,6 +208,8 @@ def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):
add_trace: Whether to add python code trace information.
Used to support "code" view.
"""
+ if not graph and context.in_graph_mode():
+ graph = ops.get_default_graph()
op_log = merge_default_with_oplog(graph, op_log, run_meta, add_trace)
with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log: