aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/summary
diff options
context:
space:
mode:
authorGravatar Nick Felt <nickfelt@google.com>2018-04-10 23:44:12 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-10 23:46:53 -0700
commit6accb84d8437cb915e23d83673c233f5084aad68 (patch)
tree44a28a02a04b2cbcc6f3f340f8ec60e48abb8bdb /tensorflow/python/summary
parent231146433a45ca8135e132ee0b48469798ca0b1f (diff)
Create FileWriter <-> tf.contrib.summary compatibility layer
This provides an implementation of FileWriter, activated by passing in a `session` parameter to the constructor, that is backed by session.run'ing graph ops that manipulate a tf.contrib.summary.create_file_writer() instance. Because tf.contrib.summary.SummaryWriters are backed by shared resources in the graph, this makes it possible to have a FileWriter and a tf.contrib.summary.SummaryWriter that both write to the same events file. This change includes some related smaller changes: - Factors out training_utils.py into a separate target to avoid a cyclic dep - Moves contrib/summary/summary_ops.py to python/ops/summary_ops_v2.py - Adds SummaryWriter.init(), .flush(), and .close() op-returning methods - Changes create_file_writer() `name` arg to default to logdir prefixed by `logdir:` so shared resources are scoped by logdir by default - Fixes a bug with tf.contrib.summary.flush() `writer` arg - Makes create_file_writer()'s max_queue arg behave as documented - Adds more testing for existing tf.contrib.summary API PiperOrigin-RevId: 192408079
Diffstat (limited to 'tensorflow/python/summary')
-rw-r--r--tensorflow/python/summary/writer/event_file_writer_v2.py140
-rw-r--r--tensorflow/python/summary/writer/writer.py40
-rw-r--r--tensorflow/python/summary/writer/writer_test.py233
3 files changed, 357 insertions, 56 deletions
diff --git a/tensorflow/python/summary/writer/event_file_writer_v2.py b/tensorflow/python/summary/writer/event_file_writer_v2.py
new file mode 100644
index 0000000000..5c66c0f7a8
--- /dev/null
+++ b/tensorflow/python/summary/writer/event_file_writer_v2.py
@@ -0,0 +1,140 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Writes events to disk in a logdir."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import summary_ops_v2
+from tensorflow.python.platform import gfile
+
+
+class EventFileWriterV2(object):
+ """Writes `Event` protocol buffers to an event file via the graph.
+
+ The `EventFileWriterV2` class is backed by the summary file writer in the v2
+ summary API (currently in tf.contrib.summary), so it uses a shared summary
+ writer resource and graph ops to write events.
+
+ As with the original EventFileWriter, this class will asynchronously write
+ Event protocol buffers to the backing file. The Event file is encoded using
+ the tfrecord format, which is similar to RecordIO.
+ """
+
+ def __init__(self, session, logdir, max_queue=10, flush_secs=120,
+ filename_suffix=''):
+ """Creates an `EventFileWriterV2` and an event file to write to.
+
+ On construction, this calls `tf.contrib.summary.create_file_writer` within
+ the graph from `session.graph` to look up a shared summary writer resource
+ for `logdir` if one exists, and create one if not. Creating the summary
+ writer resource in turn creates a new event file in `logdir` to be filled
+ with `Event` protocol buffers passed to `add_event`. Graph ops to control
+ this writer resource are added to `session.graph` during this init call;
+ stateful methods on this class will call `session.run()` on these ops.
+
+ Note that because the underlying resource is shared, it is possible that
+ other parts of the code using the same session may interact independently
+ with the resource, e.g. by flushing or even closing it. It is the caller's
+ responsibility to avoid any undesirable sharing in this regard.
+
+ The remaining arguments to the constructor (`flush_secs`, `max_queue`, and
+ `filename_suffix`) control the construction of the shared writer resource
+ if one is created. If an existing resource is reused, these arguments have
+ no effect. See `tf.contrib.summary.create_file_writer` for details.
+
+ Args:
+ session: A `tf.Session`. Session that will hold shared writer resource.
+ The writer ops will be added to session.graph during this init call.
+ logdir: A string. Directory where event file will be written.
+ max_queue: Integer. Size of the queue for pending events and summaries.
+ flush_secs: Number. How often, in seconds, to flush the
+ pending events and summaries to disk.
+ filename_suffix: A string. Every event file's name is suffixed with
+ `filename_suffix`.
+ """
+ self._session = session
+ self._logdir = logdir
+ self._closed = False
+ if not gfile.IsDirectory(self._logdir):
+ gfile.MakeDirs(self._logdir)
+
+ with self._session.graph.as_default():
+ with ops.name_scope('filewriter'):
+ file_writer = summary_ops_v2.create_file_writer(
+ logdir=self._logdir,
+ max_queue=max_queue,
+ flush_millis=flush_secs * 1000,
+ filename_suffix=filename_suffix)
+ with summary_ops_v2.always_record_summaries(), file_writer.as_default():
+ self._event_placeholder = array_ops.placeholder_with_default(
+ constant_op.constant('unused', dtypes.string),
+ shape=[])
+ self._add_event_op = summary_ops_v2.import_event(
+ self._event_placeholder)
+ self._init_op = file_writer.init()
+ self._flush_op = file_writer.flush()
+ self._close_op = file_writer.close()
+ self._session.run(self._init_op)
+
+ def get_logdir(self):
+ """Returns the directory where event file will be written."""
+ return self._logdir
+
+ def reopen(self):
+ """Reopens the EventFileWriter.
+
+ Can be called after `close()` to add more events in the same directory.
+ The events will go into a new events file.
+
+ Does nothing if the EventFileWriter was not closed.
+ """
+ if self._closed:
+ self._closed = False
+ self._session.run(self._init_op)
+
+ def add_event(self, event):
+ """Adds an event to the event file.
+
+ Args:
+ event: An `Event` protocol buffer.
+ """
+ if not self._closed:
+ event_pb = event.SerializeToString()
+ self._session.run(
+ self._add_event_op, feed_dict={self._event_placeholder: event_pb})
+
+ def flush(self):
+ """Flushes the event file to disk.
+
+ Call this method to make sure that all pending events have been written to
+ disk.
+ """
+ self._session.run(self._flush_op)
+
+ def close(self):
+ """Flushes the event file to disk and close the file.
+
+ Call this method when you do not need the summary writer anymore.
+ """
+ if not self._closed:
+ self.flush()
+ self._session.run(self._close_op)
+ self._closed = True
diff --git a/tensorflow/python/summary/writer/writer.py b/tensorflow/python/summary/writer/writer.py
index 57f78c156b..aca084fc91 100644
--- a/tensorflow/python/summary/writer/writer.py
+++ b/tensorflow/python/summary/writer/writer.py
@@ -32,6 +32,7 @@ from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
+from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2
from tensorflow.python.util.tf_export import tf_export
_PLUGINS_DIR = "plugins"
@@ -286,6 +287,11 @@ class FileWriter(SummaryToEventTransformer):
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
+
+ When constructed with a `tf.Session` parameter, a `FileWriter` instead forms
+ a compatibility layer over new graph-based summaries (`tf.contrib.summary`)
+ to facilitate the use of new summary writing with pre-existing code that
+ expects a `FileWriter` instance.
"""
def __init__(self,
@@ -294,10 +300,11 @@ class FileWriter(SummaryToEventTransformer):
max_queue=10,
flush_secs=120,
graph_def=None,
- filename_suffix=None):
- """Creates a `FileWriter` and an event file.
+ filename_suffix=None,
+ session=None):
+ """Creates a `FileWriter`, optionally shared within the given session.
- On construction the summary writer creates a new event file in `logdir`.
+ Typically, constructing a file writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
@@ -317,13 +324,16 @@ class FileWriter(SummaryToEventTransformer):
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
- The other arguments to the constructor control the asynchronous writes to
- the event file:
-
- * `flush_secs`: How often, in seconds, to flush the added summaries
- and events to disk.
- * `max_queue`: Maximum number of summaries or events pending to be
- written to disk before one of the 'add' calls block.
+ The `session` argument to the constructor makes the returned `FileWriter` a
+ a compatibility layer over new graph-based summaries (`tf.contrib.summary`).
+ Crucially, this means the underlying writer resource and events file will
+ be shared with any other `FileWriter` using the same `session` and `logdir`,
+ and with any `tf.contrib.summary.SummaryWriter` in this session using the
+ the same shared resource name (which by default scoped to the logdir). If
+ no such resource exists, one will be created using the remaining arguments
+ to this constructor, but if one already exists those arguments are ignored.
+ In either case, ops will be added to `session.graph` to control the
+ underlying file writer resource. See `tf.contrib.summary` for more details.
Args:
logdir: A string. Directory where event file will be written.
@@ -334,6 +344,7 @@ class FileWriter(SummaryToEventTransformer):
graph_def: DEPRECATED: Use the `graph` argument instead.
filename_suffix: A string. Every event file's name is suffixed with
`suffix`.
+ session: A `tf.Session` object. See details above.
Raises:
RuntimeError: If called with eager execution enabled.
@@ -347,9 +358,12 @@ class FileWriter(SummaryToEventTransformer):
raise RuntimeError(
"tf.summary.FileWriter is not compatible with eager execution. "
"Use tf.contrib.summary instead.")
-
- event_writer = EventFileWriter(logdir, max_queue, flush_secs,
- filename_suffix)
+ if session is not None:
+ event_writer = EventFileWriterV2(
+ session, logdir, max_queue, flush_secs, filename_suffix)
+ else:
+ event_writer = EventFileWriter(logdir, max_queue, flush_secs,
+ filename_suffix)
super(FileWriter, self).__init__(event_writer, graph, graph_def)
def __enter__(self):
diff --git a/tensorflow/python/summary/writer/writer_test.py b/tensorflow/python/summary/writer/writer_test.py
index 88ade0aac3..dc990c2602 100644
--- a/tensorflow/python/summary/writer/writer_test.py
+++ b/tensorflow/python/summary/writer/writer_test.py
@@ -29,10 +29,12 @@ from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.core.util.event_pb2 import SessionLog
+from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
+from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import plugin_asset
@@ -42,7 +44,10 @@ from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.util import compat
-class SummaryWriterTestCase(test.TestCase):
+class FileWriterTestCase(test.TestCase):
+
+ def _FileWriter(self, *args, **kwargs):
+ return writer.FileWriter(*args, **kwargs)
def _TestDir(self, test_name):
test_dir = os.path.join(self.get_temp_dir(), test_name)
@@ -96,7 +101,7 @@ class SummaryWriterTestCase(test.TestCase):
def testAddingSummaryGraphAndRunMetadata(self):
test_dir = self._CleanTestDir("basics")
- sw = writer.FileWriter(test_dir)
+ sw = self._FileWriter(test_dir)
sw.add_session_log(event_pb2.SessionLog(status=SessionLog.START), 1)
sw.add_summary(
@@ -171,7 +176,7 @@ class SummaryWriterTestCase(test.TestCase):
test_dir = self._CleanTestDir("basics_named_graph")
with ops.Graph().as_default() as g:
constant_op.constant([12], name="douze")
- sw = writer.FileWriter(test_dir, graph=g)
+ sw = self._FileWriter(test_dir, graph=g)
sw.close()
self._assertEventsWithGraph(test_dir, g, True)
@@ -179,7 +184,7 @@ class SummaryWriterTestCase(test.TestCase):
test_dir = self._CleanTestDir("basics_positional_graph")
with ops.Graph().as_default() as g:
constant_op.constant([12], name="douze")
- sw = writer.FileWriter(test_dir, g)
+ sw = self._FileWriter(test_dir, g)
sw.close()
self._assertEventsWithGraph(test_dir, g, True)
@@ -188,7 +193,7 @@ class SummaryWriterTestCase(test.TestCase):
with ops.Graph().as_default() as g:
constant_op.constant([12], name="douze")
gd = g.as_graph_def()
- sw = writer.FileWriter(test_dir, graph_def=gd)
+ sw = self._FileWriter(test_dir, graph_def=gd)
sw.close()
self._assertEventsWithGraph(test_dir, g, False)
@@ -197,7 +202,7 @@ class SummaryWriterTestCase(test.TestCase):
with ops.Graph().as_default() as g:
constant_op.constant([12], name="douze")
gd = g.as_graph_def()
- sw = writer.FileWriter(test_dir, gd)
+ sw = self._FileWriter(test_dir, gd)
sw.close()
self._assertEventsWithGraph(test_dir, g, False)
@@ -207,18 +212,18 @@ class SummaryWriterTestCase(test.TestCase):
with ops.Graph().as_default() as g:
constant_op.constant([12], name="douze")
gd = g.as_graph_def()
- sw = writer.FileWriter(test_dir, graph=g, graph_def=gd)
+ sw = self._FileWriter(test_dir, graph=g, graph_def=gd)
sw.close()
def testNeitherGraphNorGraphDef(self):
with self.assertRaises(TypeError):
test_dir = self._CleanTestDir("basics_string_instead_of_graph")
- sw = writer.FileWriter(test_dir, "string instead of graph object")
+ sw = self._FileWriter(test_dir, "string instead of graph object")
sw.close()
def testCloseAndReopen(self):
test_dir = self._CleanTestDir("close_and_reopen")
- sw = writer.FileWriter(test_dir)
+ sw = self._FileWriter(test_dir)
sw.add_session_log(event_pb2.SessionLog(status=SessionLog.START), 1)
sw.close()
# Sleep at least one second to make sure we get a new event file name.
@@ -261,7 +266,7 @@ class SummaryWriterTestCase(test.TestCase):
def testNonBlockingClose(self):
test_dir = self._CleanTestDir("non_blocking_close")
- sw = writer.FileWriter(test_dir)
+ sw = self._FileWriter(test_dir)
# Sleep 1.2 seconds to make sure event queue is empty.
time.sleep(1.2)
time_before_close = time.time()
@@ -270,7 +275,7 @@ class SummaryWriterTestCase(test.TestCase):
def testWithStatement(self):
test_dir = self._CleanTestDir("with_statement")
- with writer.FileWriter(test_dir) as sw:
+ with self._FileWriter(test_dir) as sw:
sw.add_session_log(event_pb2.SessionLog(status=SessionLog.START), 1)
event_paths = sorted(glob.glob(os.path.join(test_dir, "event*")))
self.assertEquals(1, len(event_paths))
@@ -280,7 +285,7 @@ class SummaryWriterTestCase(test.TestCase):
# protocol buffers correctly.
def testAddingSummariesFromSessionRunCalls(self):
test_dir = self._CleanTestDir("global_step")
- sw = writer.FileWriter(test_dir)
+ sw = self._FileWriter(test_dir)
with self.test_session():
i = constant_op.constant(1, dtype=dtypes.int32, shape=[])
l = constant_op.constant(2, dtype=dtypes.int64, shape=[])
@@ -327,7 +332,7 @@ class SummaryWriterTestCase(test.TestCase):
def testPluginMetadataStrippedFromSubsequentEvents(self):
test_dir = self._CleanTestDir("basics")
- sw = writer.FileWriter(test_dir)
+ sw = self._FileWriter(test_dir)
sw.add_session_log(event_pb2.SessionLog(status=SessionLog.START), 1)
@@ -386,7 +391,7 @@ class SummaryWriterTestCase(test.TestCase):
def testFileWriterWithSuffix(self):
test_dir = self._CleanTestDir("test_suffix")
- sw = writer.FileWriter(test_dir, filename_suffix="_test_suffix")
+ sw = self._FileWriter(test_dir, filename_suffix="_test_suffix")
for _ in range(10):
sw.add_summary(
summary_pb2.Summary(value=[
@@ -400,9 +405,178 @@ class SummaryWriterTestCase(test.TestCase):
for filename in event_filenames:
self.assertTrue(filename.endswith("_test_suffix"))
+ def testPluginAssetSerialized(self):
+ class ExamplePluginAsset(plugin_asset.PluginAsset):
+ plugin_name = "example"
+
+ def assets(self):
+ return {"foo.txt": "foo!", "bar.txt": "bar!"}
+
+ with ops.Graph().as_default() as g:
+ plugin_asset.get_plugin_asset(ExamplePluginAsset)
+
+ logdir = self.get_temp_dir()
+ fw = self._FileWriter(logdir)
+ fw.add_graph(g)
+ plugin_dir = os.path.join(logdir, writer._PLUGINS_DIR, "example")
+
+ with gfile.Open(os.path.join(plugin_dir, "foo.txt"), "r") as f:
+ content = f.read()
+ self.assertEqual(content, "foo!")
+
+ with gfile.Open(os.path.join(plugin_dir, "bar.txt"), "r") as f:
+ content = f.read()
+ self.assertEqual(content, "bar!")
-class SummaryWriterCacheTest(test.TestCase):
- """SummaryWriterCache tests."""
+
+class SessionBasedFileWriterTestCase(FileWriterTestCase):
+ """Tests for FileWriter behavior when passed a Session argument."""
+
+ def _FileWriter(self, *args, **kwargs):
+ if "session" not in kwargs:
+ # Pass in test_session() as the session. It will be cached during this
+ # test method invocation so that any other use of test_session() with no
+ # graph should result in re-using the same underlying Session.
+ with self.test_session() as sess:
+ kwargs["session"] = sess
+ return writer.FileWriter(*args, **kwargs)
+ return writer.FileWriter(*args, **kwargs)
+
+ def _createTaggedSummary(self, tag):
+ summary = summary_pb2.Summary()
+ summary.value.add(tag=tag)
+ return summary
+
+ def testSharing_withOtherSessionBasedFileWriters(self):
+ logdir = self.get_temp_dir()
+ with session.Session() as sess:
+ # Initial file writer
+ writer1 = writer.FileWriter(session=sess, logdir=logdir)
+ writer1.add_summary(self._createTaggedSummary("one"), 1)
+ writer1.flush()
+
+ # File writer, should share file with writer1
+ writer2 = writer.FileWriter(session=sess, logdir=logdir)
+ writer2.add_summary(self._createTaggedSummary("two"), 2)
+ writer2.flush()
+
+ # File writer with different logdir (shouldn't be in this logdir at all)
+ writer3 = writer.FileWriter(session=sess, logdir=logdir + "-other")
+ writer3.add_summary(self._createTaggedSummary("three"), 3)
+ writer3.flush()
+
+ # File writer in a different session (should be in separate file)
+ time.sleep(1.1) # Ensure filename has a different timestamp
+ with session.Session() as other_sess:
+ writer4 = writer.FileWriter(session=other_sess, logdir=logdir)
+ writer4.add_summary(self._createTaggedSummary("four"), 4)
+ writer4.flush()
+
+ # One more file writer, should share file with writer1
+ writer5 = writer.FileWriter(session=sess, logdir=logdir)
+ writer5.add_summary(self._createTaggedSummary("five"), 5)
+ writer5.flush()
+
+ event_paths = iter(sorted(glob.glob(os.path.join(logdir, "event*"))))
+
+ # First file should have tags "one", "two", and "five"
+ events = summary_iterator.summary_iterator(next(event_paths))
+ self.assertEqual("brain.Event:2", next(events).file_version)
+ self.assertEqual("one", next(events).summary.value[0].tag)
+ self.assertEqual("two", next(events).summary.value[0].tag)
+ self.assertEqual("five", next(events).summary.value[0].tag)
+ self.assertRaises(StopIteration, lambda: next(events))
+
+ # Second file should have just "four"
+ events = summary_iterator.summary_iterator(next(event_paths))
+ self.assertEqual("brain.Event:2", next(events).file_version)
+ self.assertEqual("four", next(events).summary.value[0].tag)
+ self.assertRaises(StopIteration, lambda: next(events))
+
+ # No more files
+ self.assertRaises(StopIteration, lambda: next(event_paths))
+
+ # Just check that the other logdir file exists to be sure we wrote it
+ self.assertTrue(glob.glob(os.path.join(logdir + "-other", "event*")))
+
+ def testSharing_withExplicitSummaryFileWriters(self):
+ logdir = self.get_temp_dir()
+ with session.Session() as sess:
+ # Initial file writer via FileWriter(session=?)
+ writer1 = writer.FileWriter(session=sess, logdir=logdir)
+ writer1.add_summary(self._createTaggedSummary("one"), 1)
+ writer1.flush()
+
+ # Next one via create_file_writer(), should use same file
+ writer2 = summary_ops_v2.create_file_writer(logdir=logdir)
+ with summary_ops_v2.always_record_summaries(), writer2.as_default():
+ summary2 = summary_ops_v2.scalar("two", 2.0, step=2)
+ sess.run(writer2.init())
+ sess.run(summary2)
+ sess.run(writer2.flush())
+
+ # Next has different shared name, should be in separate file
+ time.sleep(1.1) # Ensure filename has a different timestamp
+ writer3 = summary_ops_v2.create_file_writer(logdir=logdir, name="other")
+ with summary_ops_v2.always_record_summaries(), writer3.as_default():
+ summary3 = summary_ops_v2.scalar("three", 3.0, step=3)
+ sess.run(writer3.init())
+ sess.run(summary3)
+ sess.run(writer3.flush())
+
+ # Next uses a second session, should be in separate file
+ time.sleep(1.1) # Ensure filename has a different timestamp
+ with session.Session() as other_sess:
+ writer4 = summary_ops_v2.create_file_writer(logdir=logdir)
+ with summary_ops_v2.always_record_summaries(), writer4.as_default():
+ summary4 = summary_ops_v2.scalar("four", 4.0, step=4)
+ other_sess.run(writer4.init())
+ other_sess.run(summary4)
+ other_sess.run(writer4.flush())
+
+ # Next via FileWriter(session=?) uses same second session, should be in
+ # same separate file. (This checks sharing in the other direction)
+ writer5 = writer.FileWriter(session=other_sess, logdir=logdir)
+ writer5.add_summary(self._createTaggedSummary("five"), 5)
+ writer5.flush()
+
+ # One more via create_file_writer(), should use same file
+ writer6 = summary_ops_v2.create_file_writer(logdir=logdir)
+ with summary_ops_v2.always_record_summaries(), writer6.as_default():
+ summary6 = summary_ops_v2.scalar("six", 6.0, step=6)
+ sess.run(writer6.init())
+ sess.run(summary6)
+ sess.run(writer6.flush())
+
+ event_paths = iter(sorted(glob.glob(os.path.join(logdir, "event*"))))
+
+ # First file should have tags "one", "two", and "six"
+ events = summary_iterator.summary_iterator(next(event_paths))
+ self.assertEqual("brain.Event:2", next(events).file_version)
+ self.assertEqual("one", next(events).summary.value[0].tag)
+ self.assertEqual("two", next(events).summary.value[0].tag)
+ self.assertEqual("six", next(events).summary.value[0].tag)
+ self.assertRaises(StopIteration, lambda: next(events))
+
+ # Second file should have just "three"
+ events = summary_iterator.summary_iterator(next(event_paths))
+ self.assertEqual("brain.Event:2", next(events).file_version)
+ self.assertEqual("three", next(events).summary.value[0].tag)
+ self.assertRaises(StopIteration, lambda: next(events))
+
+ # Third file should have "four" and "five"
+ events = summary_iterator.summary_iterator(next(event_paths))
+ self.assertEqual("brain.Event:2", next(events).file_version)
+ self.assertEqual("four", next(events).summary.value[0].tag)
+ self.assertEqual("five", next(events).summary.value[0].tag)
+ self.assertRaises(StopIteration, lambda: next(events))
+
+ # No more files
+ self.assertRaises(StopIteration, lambda: next(event_paths))
+
+
+class FileWriterCacheTest(test.TestCase):
+ """FileWriterCache tests."""
def _test_dir(self, test_name):
"""Create an empty dir to use for tests.
@@ -448,32 +622,5 @@ class SummaryWriterCacheTest(test.TestCase):
self.assertFalse(sw1 == sw2)
-class ExamplePluginAsset(plugin_asset.PluginAsset):
- plugin_name = "example"
-
- def assets(self):
- return {"foo.txt": "foo!", "bar.txt": "bar!"}
-
-
-class PluginAssetsTest(test.TestCase):
-
- def testPluginAssetSerialized(self):
- with ops.Graph().as_default() as g:
- plugin_asset.get_plugin_asset(ExamplePluginAsset)
-
- logdir = self.get_temp_dir()
- fw = writer.FileWriter(logdir)
- fw.add_graph(g)
- plugin_dir = os.path.join(logdir, writer._PLUGINS_DIR, "example")
-
- with gfile.Open(os.path.join(plugin_dir, "foo.txt"), "r") as f:
- content = f.read()
- self.assertEqual(content, "foo!")
-
- with gfile.Open(os.path.join(plugin_dir, "bar.txt"), "r") as f:
- content = f.read()
- self.assertEqual(content, "bar!")
-
-
if __name__ == "__main__":
test.main()