提交 66385384 编写于 作者: D Dan Mané 提交者: TensorFlower Gardener

TensorFlow summary API migration.

Create tf.summary.{image, histogram, audio, merge, merge_all}

These duplicate tf.{image, histogram, audio, merge, merge_all}_summary.
We will deprecate the existing versions in a future change.

The APIs for the migrated summary ops are almost the same. Here are the changes:
- The first argument for each summary op is now "name" rather than "tag". The "tag" argument is removed entirely. The name will be used to construct the op name per standard TensorFlow naming conventions, meaning there is no longer a need to do manual name-prefixing to get re-usable summary ops.
- The image summary "max_images" argument was renamed to "max_outputs", for consistency with the audio summary and other future summaries.

A future change will migrate existing usage where possible, and remove the duplicated implementations in logging_ops.py
Change: 136636253
上级 7c7d499d
......@@ -44,19 +44,17 @@ class SummaryAudioOpTest(tf.test.TestCase):
def testAudioSummary(self):
np.random.seed(7)
with self.test_session() as sess:
num_frames = 7
for channels in 1, 2, 5, 8:
for channels in (1, 2, 5, 8):
with self.test_session(graph=tf.Graph()) as sess:
num_frames = 7
shape = (4, num_frames, channels)
# Generate random audio in the range [-1.0, 1.0).
const = 2.0 * np.random.random(shape) - 1.0
# Summarize
sample_rate = 8000
summ = tf.audio_summary("snd",
const,
max_outputs=3,
sample_rate=sample_rate)
summ = tf.summary.audio(
"snd", const, max_outputs=3, sample_rate=sample_rate)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
audio_summ = self._AsSummary(value)
......
......@@ -46,11 +46,11 @@ class SummaryImageOpTest(tf.test.TestCase):
def testImageSummary(self):
np.random.seed(7)
with self.test_session() as sess:
for depth in 1, 3, 4:
shape = (4, 5, 7) + (depth,)
bad_color = [255, 0, 0, 255][:depth]
for positive in False, True:
for depth in (1, 3, 4):
for positive in False, True:
with self.test_session(graph=tf.Graph()) as sess:
shape = (4, 5, 7) + (depth,)
bad_color = [255, 0, 0, 255][:depth]
# Build a mostly random image with one nan
const = np.random.randn(*shape).astype(np.float32)
const[0, 1, 2] = 0 # Make the nan entry not the max
......@@ -65,7 +65,7 @@ class SummaryImageOpTest(tf.test.TestCase):
const[0, 1, 2, depth // 2] = np.nan
# Summarize
summ = tf.image_summary("img", const)
summ = tf.summary.image("img", const)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
......@@ -82,8 +82,8 @@ class SummaryImageOpTest(tf.test.TestCase):
def testImageSummaryUint8(self):
np.random.seed(7)
with self.test_session() as sess:
for depth in 1, 3, 4:
for depth in (1, 3, 4):
with self.test_session(graph=tf.Graph()) as sess:
shape = (4, 5, 7) + (depth,)
# Build a random uint8 image
......@@ -92,7 +92,7 @@ class SummaryImageOpTest(tf.test.TestCase):
self.assertEqual(tf_images.dtype, tf.uint8)
# Summarize
summ = tf.image_summary("img", tf_images)
summ = tf.summary.image("img", tf_images)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
......
......@@ -52,9 +52,9 @@ class SummaryOpsTest(tf.test.TestCase):
def testMergeSummary(self):
with self.test_session() as sess:
const = tf.constant(10.0)
summ1 = tf.histogram_summary("h", const, name="histo")
summ2 = tf.scalar_summary("c", const, name="summ")
merge = tf.merge_summary([summ1, summ2])
summ1 = tf.summary.histogram("h", const)
summ2 = tf.scalar_summary("c", const)
merge = tf.summary.merge([summ1, summ2])
value = sess.run(merge)
self.assertEqual([], merge.get_shape())
self.assertProtoEquals("""
......@@ -80,11 +80,10 @@ class SummaryOpsTest(tf.test.TestCase):
def testMergeAllSummaries(self):
with tf.Graph().as_default():
const = tf.constant(10.0)
summ1 = tf.histogram_summary("h", const, name="histo")
summ2 = tf.scalar_summary("o", const, name="oops",
collections=["foo_key"])
summ3 = tf.scalar_summary("c", const, name="summ")
merge = tf.merge_all_summaries()
summ1 = tf.summary.histogram("h", const)
summ2 = tf.summary.scalar("o", const, collections=["foo_key"])
summ3 = tf.summary.scalar("c", const)
merge = tf.summary.merge_all()
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(2, len(merge.op.inputs))
self.assertEqual(summ1, merge.op.inputs[0])
......@@ -100,7 +99,7 @@ class SummaryOpsTest(tf.test.TestCase):
for dtype in (tf.int8, tf.uint8, tf.int16, tf.int32,
tf.float32, tf.float64):
const = tf.constant(10, dtype=dtype)
tf.histogram_summary("h", const, name="histo")
tf.summary.histogram("h", const)
if __name__ == "__main__":
......
......@@ -663,6 +663,47 @@ class MockingEventAccumulatorTest(EventAccumulatorTest):
self.assertEqual(accumulator.Scalars('scalar1'), seq1)
self.assertEqual(accumulator.Scalars('scalar2'), seq2)
def testTFSummaryImage(self):
"""Verify processing of tf.summary.image."""
event_sink = _EventGenerator(zero_out_timestamps=True)
writer = SummaryToEventTransformer(event_sink)
with self.test_session() as sess:
ipt = tf.ones([10, 4, 4, 3], tf.uint8)
# This is an interesting example, because the old tf.image_summary op
# would throw an error here, because it would be tag reuse.
# Using the tf node name instead allows argument re-use to the image
# summary.
with tf.name_scope('1'):
tf.summary.image('images', ipt, max_outputs=1)
with tf.name_scope('2'):
tf.summary.image('images', ipt, max_outputs=2)
with tf.name_scope('3'):
tf.summary.image('images', ipt, max_outputs=3)
merged = tf.merge_all_summaries()
writer.add_graph(sess.graph)
for i in xrange(10):
summ = sess.run(merged)
writer.add_summary(summ, global_step=i)
accumulator = ea.EventAccumulator(event_sink)
accumulator.Reload()
tags = [
u'1/images/image', u'2/images/image/0', u'2/images/image/1',
u'3/images/image/0', u'3/images/image/1', u'3/images/image/2'
]
self.assertTagsEqual(accumulator.Tags(), {
ea.IMAGES: tags,
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
class RealisticEventAccumulatorTest(EventAccumulatorTest):
......
......@@ -18,6 +18,11 @@
### Summary Ops
@@tensor_summary
@@scalar
@@histogram
@@audio
@@image
@@merge
@@merge_all
## Utilities
@@get_summary_description
......@@ -30,18 +35,24 @@ from __future__ import print_function
import six
from google.protobuf import json_format as _json_format
from tensorflow.core.framework import summary_pb2 as _summary_pb2
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.python.framework.dtypes import as_dtype as _as_dtype
# Exports tensor_summary:
from tensorflow.python.ops import gen_logging_ops as _gen_logging_ops
# exports tensor_summary
from tensorflow.python.ops.summary_ops import tensor_summary
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util import compat as _compat
SCALAR_SUMMARY_LABEL = "tf_summary_type:scalar"
def _collect(val, collections, default_collections):
if collections is None:
collections = default_collections
for key in collections:
_ops.add_to_collection(key, val)
def scalar(name, tensor, summary_description=None, collections=None):
......@@ -73,11 +84,193 @@ def scalar(name, tensor, summary_description=None, collections=None):
if summary_description is None:
summary_description = _summary_pb2.SummaryDescription()
summary_description.type_hint = "scalar"
summary_description.type_hint = 'scalar'
return tensor_summary(name, tensor, summary_description, collections)
def image(name, tensor, max_outputs=3, collections=None):
"""Outputs a `Summary` protocol buffer with images.
The summary has up to `max_images` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_outputs: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with _ops.name_scope(name, None, [tensor]) as scope:
# pylint: disable=protected-access
val = _gen_logging_ops._image_summary(
tag=scope.rstrip('/'),
tensor=tensor,
max_images=max_outputs,
name=scope)
_collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
def histogram(name, values, collections=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
# pylint: enable=line-too-long
with _ops.name_scope(name, 'HistogramSummary', [values]) as scope:
# pylint: disable=protected-access
val = _gen_logging_ops._histogram_summary(
tag=scope.rstrip('/'), values=values, name=scope)
_collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
def audio(name, tensor, sample_rate, max_outputs=3, collections=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/audio/0', '*name*/audio/1', etc
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
# pylint: enable=line-too-long
with _ops.name_scope(name, None, [tensor]) as scope:
# pylint: disable=protected-access
sample_rate = _ops.convert_to_tensor(
sample_rate, dtype=_dtypes.float32, name='sample_rate')
val = _gen_logging_ops._audio_summary_v2(
tag=scope.rstrip('/'),
tensor=tensor,
max_outputs=max_outputs,
sample_rate=sample_rate,
name=scope)
_collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
def merge(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
# pylint: enable=line-too-long
with _ops.name_scope(name, 'Merge', inputs):
# pylint: disable=protected-access
val = _gen_logging_ops._merge_summary(inputs=inputs, name=name)
_collect(val, collections, [])
return val
def merge_all(key=_ops.GraphKeys.SUMMARIES):
"""Merges all summaries collected in the default graph.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = _ops.get_collection(key)
if not summary_ops:
return None
else:
return merge(summary_ops)
def get_summary_description(node_def):
"""Given a TensorSummary node_def, retrieve its SummaryDescription.
......@@ -94,17 +287,12 @@ def get_summary_description(node_def):
ValueError: if the node is not a summary op.
"""
if node_def.op != "TensorSummary":
raise ValueError("Cannot get_summary_description on %s" % node_def.op)
description_str = _compat.as_str_any(node_def.attr["description"].s)
if node_def.op != 'TensorSummary':
raise ValueError("Can't get_summary_description on %s" % node_def.op)
description_str = _compat.as_str_any(node_def.attr['description'].s)
summary_description = _summary_pb2.SummaryDescription()
_json_format.Parse(description_str, summary_description)
return summary_description
_allowed_symbols = [
"SCALAR_SUMMARY_LABEL"
]
remove_undocumented(__name__, _allowed_symbols)
remove_undocumented(__name__, [])
......@@ -17,6 +17,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from google.protobuf import json_format
......@@ -77,6 +78,31 @@ class ScalarSummaryTest(tf.test.TestCase):
json_format.Parse(description, summary_description)
self.assertEqual(summary_description.type_hint, 'scalar')
def testImageSummary(self):
with self.test_session() as s:
i = tf.ones((5, 4, 4, 3))
with tf.name_scope('outer'):
im = tf.summary.image('inner', i, max_outputs=3)
summary_str = s.run(im)
summary = tf.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('outer/inner/image/{}'.format(i) for i in xrange(3))
self.assertEqual(tags, expected)
def testHistogramSummary(self):
with self.test_session() as s:
i = tf.ones((5, 4, 4, 3))
with tf.name_scope('outer'):
summ_op = tf.summary.histogram('inner', i)
summary_str = s.run(summ_op)
summary = tf.Summary()
summary.ParseFromString(summary_str)
self.assertEqual(len(summary.value), 1)
self.assertEqual(summary.value[0].tag, 'outer/inner')
if __name__ == '__main__':
tf.test.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册