Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions tensorboard/plugins/graph/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,16 @@ py_test(
],
)

py_binary(
name = "graphs_demo",
srcs = ["graphs_demo.py"],
srcs_version = "PY3",
deps = [
"//tensorboard:expect_numpy_installed",
"//tensorboard:expect_tensorflow_installed",
],
)

py_library(
name = "keras_util",
srcs = ["keras_util.py"],
Expand Down
133 changes: 133 additions & 0 deletions tensorboard/plugins/graph/graphs_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sample data for the graph plugin.

Most demos emit basic run graph data, but the graph plugin also shows
more specialized data types. See function docstrings for details about
what runs have what data.
"""

import os

import tensorflow as tf
import numpy as np

LOGDIR = "/tmp/graphs_demo"


def main():
tagged()
profile()
keras()


def tagged():
"""Create run graph data with `TaggedRunMetadata`.

The `tagged` run has a top-level run graph as well as steps
`step_0000` through `step_0002`, each with profile data.
"""
logdir = os.path.join(LOGDIR, "tagged")
with tf.compat.v1.Graph().as_default():
with tf.compat.v1.Session() as sess:
step_tensor = tf.compat.v1.placeholder(shape=(), dtype=tf.int32)
output = step_tensor * 2

writer = tf.compat.v1.summary.FileWriter(logdir)
with writer:
writer.add_graph(sess.graph)
for step in range(3):
feed_dict = {step_tensor: step}
run_options = tf.compat.v1.RunOptions()
run_options.trace_level = tf.compat.v1.RunOptions.FULL_TRACE
run_metadata = tf.compat.v1.RunMetadata()
s = sess.run(
output,
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata,
)
writer.add_run_metadata(run_metadata, "step_%04d" % step)


def keras():
"""Create a Keras conceptual graph and op graphs.

The `keras/train` run has a run-level graph, a `batch_2` tag with op
graph only (`graph_run_metadata_graph` plugin), and a `keras` tag
with a Keras conceptual graph only (`graph_keras_model` plugin).
"""
logdir = os.path.join(LOGDIR, "keras")

data_size = 1000
train_fac = 0.8
train_size = int(data_size * train_fac)
x = np.linspace(-1, 1, data_size)
np.random.shuffle(x)
y = 0.5 * x + 2 + np.random.normal(0, 0.05, (data_size,))
(x_train, y_train) = x[:train_size], y[:train_size]
(x_test, y_test) = x[train_size:], y[train_size:]

layers = [
tf.keras.layers.Dense(16, input_dim=1),
tf.keras.layers.Dense(1),
]
model = tf.keras.models.Sequential(layers)
model.compile(
loss=tf.keras.losses.mean_squared_error,
optimizer=tf.keras.optimizers.SGD(lr=0.2),
)
model.fit(
x_train,
y_train,
batch_size=train_size,
verbose=0,
epochs=100,
validation_data=(x_test, y_test),
callbacks=[tf.keras.callbacks.TensorBoard(logdir)],
)


def profile():
"""Create data with op graphs and profile data.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I find it tiny bit odd that the graph_demo is creating profile which is a separate data structure used by a different plugin. If you expect to see profile information in RunMetadata with profile=True, I believe the new profiler is separate from RunMetadata and will be useless (it creates profile trace files but do not populate the RunMetadata like L54). Am I missing something?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the only way that I could find to generate data that lives under
the graph_run_metadata plugin. It’s written in an unexported function,
summary_ops_v2.run_metadata, which looks to only be called in this
code path
.

Is there a different way that I can test this functionality?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see. For the trace_on(graph=True, profiler=False), you are relying on the Keras callback? Since we do not closely control the TB Keras callback, I think it makes more sense to explicitly exercise

_PLUGIN_NAME_RUN_METADATA_WITH_GRAPH = "graph_run_metadata_graph"
flow, too :)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, that’s right. Sure, I suppose there’s no harm in doing that.


The `profile` run has tags `prof_f` with both profile and op graph data
(`graph_run_metadata` plugin), and `prof_g` with profile data only
(`graph_run_metadata_graph` plugin).
"""

logdir = os.path.join(LOGDIR, "profile")

@tf.function
def f(i):
return tf.constant(i) + tf.constant(i)

@tf.function
def g(i):
return tf.constant(i) * tf.constant(i)

with tf.summary.create_file_writer(logdir).as_default():
for step in range(3):
tf.summary.trace_on(profiler=True)
print(f(step).numpy())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think for more correct looking graph, you need to do tf.constant(step) here and not pass Python number as input to tf.function.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That doesn’t work:

diff --git a/tensorboard/plugins/graph/graphs_demo.py b/tensorboard/plugins/graph/graphs_demo.py
index 2329b2581..38dd41cb4 100644
--- a/tensorboard/plugins/graph/graphs_demo.py
+++ b/tensorboard/plugins/graph/graphs_demo.py
@@ -123,3 +123,3 @@ def profile():
             tf.summary.trace_on(profiler=True)
-            print(f(step).numpy())
+            print(f(tf.constant(step)).numpy())
             tf.summary.trace_export("prof_f", step=step, profiler_outdir=logdir)
TypeError: in user code:

    /HOMEDIR/.cache/bazel/_bazel_wchargin/52a95bbdd50941251730eb33b7476a66/execroot/org_tensorflow_tensorboard/bazel-out/k8-opt/bin/tensorboard/plugins/graph/graphs_demo.runfiles/org_tensorflow_tensorboard/tensorboard/plugins/graph/graphs_demo.py:115 f  *
        return tf.constant(i) + tf.constant(i)
    /VIRTUAL_ENV/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py:264 constant  **
        return _constant_impl(value, dtype, shape, name, verify_shape=False,
    /VIRTUAL_ENV/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py:281 _constant_impl
        tensor_util.make_tensor_proto(
    /VIRTUAL_ENV/lib/python3.8/site-packages/tensorflow/python/framework/tensor_util.py:457 make_tensor_proto
        _AssertCompatible(values, dtype)
    /VIRTUAL_ENV/lib/python3.8/site-packages/tensorflow/python/framework/tensor_util.py:334 _AssertCompatible
        raise TypeError("Expected any non-tensor type, got a tensor instead.")

As written, the graph looks okay to me:

Screenshot of a graph with Const and Const_1 inputs to add

And I don’t think that it matters too much what exactly the graph looks
like; I’m mostly trying to check that the data gets plumbed through
properly. It would be cool to have a set of demos as test cases for
weird graph rendering issues, but this isn’t meant to fill that need.

tf.summary.trace_export("prof_f", step=step, profiler_outdir=logdir)

tf.summary.trace_on(profiler=False)
print(g(step).numpy())
tf.summary.trace_export("prof_g", step=step)


if __name__ == "__main__":
main()