diff --git a/tensorboard/backend/application.py b/tensorboard/backend/application.py index 6ccde8d3f8..c300eea9dd 100644 --- a/tensorboard/backend/application.py +++ b/tensorboard/backend/application.py @@ -431,10 +431,10 @@ def _serve_plugin_entry(self, request): html = textwrap.dedent( """ - - - - """ + + + + """ ).format(name=name, script_content=script_content) return http_util.Respond( request, html, "text/html", csp_scripts_sha256s=[script_sha], diff --git a/tensorboard/backend/event_processing/db_import_multiplexer_test.py b/tensorboard/backend/event_processing/db_import_multiplexer_test.py index 89ee99f2d3..e287c7a33e 100644 --- a/tensorboard/backend/event_processing/db_import_multiplexer_test.py +++ b/tensorboard/backend/event_processing/db_import_multiplexer_test.py @@ -56,11 +56,11 @@ def _get_runs(self): db = self.db_connection_provider() cursor = db.execute( """ - SELECT - Runs.run_name - FROM Runs - ORDER BY Runs.run_name - """ + SELECT + Runs.run_name + FROM Runs + ORDER BY Runs.run_name + """ ) return [row[0] for row in cursor] @@ -68,11 +68,11 @@ def _get_experiments(self): db = self.db_connection_provider() cursor = db.execute( """ - SELECT - Experiments.experiment_name - FROM Experiments - ORDER BY Experiments.experiment_name - """ + SELECT + Experiments.experiment_name + FROM Experiments + ORDER BY Experiments.experiment_name + """ ) return [row[0] for row in cursor] diff --git a/tensorboard/backend/event_processing/reservoir.py b/tensorboard/backend/event_processing/reservoir.py index 74648e2021..ca5300cb72 100644 --- a/tensorboard/backend/event_processing/reservoir.py +++ b/tensorboard/backend/event_processing/reservoir.py @@ -27,36 +27,36 @@ class Reservoir(object): """A map-to-arrays container, with deterministic Reservoir Sampling. - Items are added with an associated key. Items may be retrieved by key, and - a list of keys can also be retrieved. If size is not zero, then it dictates - the maximum number of items that will be stored with each key. Once there are - more items for a given key, they are replaced via reservoir sampling, such - that each item has an equal probability of being included in the sample. - - Deterministic means that for any given seed and bucket size, the sequence of - values that are kept for any given tag will always be the same, and that this - is independent of any insertions on other tags. That is: - - >>> separate_reservoir = reservoir.Reservoir(10) - >>> interleaved_reservoir = reservoir.Reservoir(10) - >>> for i in xrange(100): - >>> separate_reservoir.AddItem('key1', i) - >>> for i in xrange(100): - >>> separate_reservoir.AddItem('key2', i) - >>> for i in xrange(100): - >>> interleaved_reservoir.AddItem('key1', i) - >>> interleaved_reservoir.AddItem('key2', i) - - separate_reservoir and interleaved_reservoir will be in identical states. - - See: https://en.wikipedia.org/wiki/Reservoir_sampling - - Adding items has amortized O(1) runtime. - - Fields: - always_keep_last: Whether the latest seen sample is always at the - end of the reservoir. Defaults to True. - size: An integer of the maximum number of samples. + Items are added with an associated key. Items may be retrieved by key, and + a list of keys can also be retrieved. If size is not zero, then it dictates + the maximum number of items that will be stored with each key. Once there are + more items for a given key, they are replaced via reservoir sampling, such + that each item has an equal probability of being included in the sample. + + Deterministic means that for any given seed and bucket size, the sequence of + values that are kept for any given tag will always be the same, and that this + is independent of any insertions on other tags. That is: + + >>> separate_reservoir = reservoir.Reservoir(10) + >>> interleaved_reservoir = reservoir.Reservoir(10) + >>> for i in xrange(100): + >>> separate_reservoir.AddItem('key1', i) + >>> for i in xrange(100): + >>> separate_reservoir.AddItem('key2', i) + >>> for i in xrange(100): + >>> interleaved_reservoir.AddItem('key1', i) + >>> interleaved_reservoir.AddItem('key2', i) + + separate_reservoir and interleaved_reservoir will be in identical states. + + See: https://en.wikipedia.org/wiki/Reservoir_sampling + + Adding items has amortized O(1) runtime. + + Fields: + always_keep_last: Whether the latest seen sample is always at the + end of the reservoir. Defaults to True. + size: An integer of the maximum number of samples. """ def __init__(self, size, seed=0, always_keep_last=True): diff --git a/tensorboard/backend/event_processing/sqlite_writer.py b/tensorboard/backend/event_processing/sqlite_writer.py index 7ac2f05c2f..eb80aff15d 100644 --- a/tensorboard/backend/event_processing/sqlite_writer.py +++ b/tensorboard/backend/event_processing/sqlite_writer.py @@ -74,9 +74,9 @@ def _maybe_init_user(self): user_id = self._create_id() cursor.execute( """ - INSERT INTO USERS (user_id, user_name, inserted_time) - VALUES (?, ?, ?) - """, + INSERT INTO USERS (user_id, user_name, inserted_time) + VALUES (?, ?, ?) + """, (user_id, user_name, time.time()), ) return user_id @@ -91,9 +91,9 @@ def _maybe_init_experiment(self, experiment_name): cursor = self._db.cursor() cursor.execute( """ - SELECT experiment_id FROM Experiments - WHERE user_id = ? AND experiment_name = ? - """, + SELECT experiment_id FROM Experiments + WHERE user_id = ? AND experiment_name = ? + """, (user_id, experiment_name), ) row = cursor.fetchone() @@ -104,11 +104,11 @@ def _maybe_init_experiment(self, experiment_name): computed_time = 0 cursor.execute( """ - INSERT INTO Experiments ( - user_id, experiment_id, experiment_name, - inserted_time, started_time, is_watching - ) VALUES (?, ?, ?, ?, ?, ?) - """, + INSERT INTO Experiments ( + user_id, experiment_id, experiment_name, + inserted_time, started_time, is_watching + ) VALUES (?, ?, ?, ?, ?, ?) + """, ( user_id, experiment_id, @@ -131,9 +131,9 @@ def _maybe_init_run(self, experiment_name, run_name): cursor = self._db.cursor() cursor.execute( """ - SELECT run_id FROM Runs - WHERE experiment_id = ? AND run_name = ? - """, + SELECT run_id FROM Runs + WHERE experiment_id = ? AND run_name = ? + """, (experiment_id, run_name), ) row = cursor.fetchone() @@ -144,10 +144,10 @@ def _maybe_init_run(self, experiment_name, run_name): started_time = 0 cursor.execute( """ - INSERT INTO Runs ( - experiment_id, run_id, run_name, inserted_time, started_time - ) VALUES (?, ?, ?, ?, ?) - """, + INSERT INTO Runs ( + experiment_id, run_id, run_name, inserted_time, started_time + ) VALUES (?, ?, ?, ?, ?) + """, (experiment_id, run_id, run_name, time.time(), started_time), ) return run_id @@ -188,11 +188,11 @@ def _maybe_init_tags(self, run_id, tag_to_metadata): ) cursor.executemany( """ - INSERT INTO Tags ( - run_id, tag_id, tag_name, inserted_time, display_name, plugin_name, - plugin_data - ) VALUES (?, ?, ?, ?, ?, ?, ?) - """, + INSERT INTO Tags ( + run_id, tag_id, tag_name, inserted_time, display_name, plugin_name, + plugin_data + ) VALUES (?, ?, ?, ?, ?, ?, ?) + """, new_tag_data, ) return tag_to_id @@ -237,10 +237,10 @@ def write_summaries(self, tagged_data, experiment_name, run_name): ) self._db.executemany( """ - INSERT OR REPLACE INTO Tensors ( - series, step, computed_time, dtype, shape, data - ) VALUES (?, ?, ?, ?, ?, ?) - """, + INSERT OR REPLACE INTO Tensors ( + series, step, computed_time, dtype, shape, data + ) VALUES (?, ?, ?, ?, ?, ?) + """, tensor_values, ) diff --git a/tensorboard/examples/plugins/example_raw_scalars/tensorboard_plugin_example_raw_scalars/plugin.py b/tensorboard/examples/plugins/example_raw_scalars/tensorboard_plugin_example_raw_scalars/plugin.py index 5727c7946b..c875c3ab3b 100644 --- a/tensorboard/examples/plugins/example_raw_scalars/tensorboard_plugin_example_raw_scalars/plugin.py +++ b/tensorboard/examples/plugins/example_raw_scalars/tensorboard_plugin_example_raw_scalars/plugin.py @@ -48,9 +48,9 @@ class ExampleRawScalarsPlugin(base_plugin.TBPlugin): def __init__(self, context): """Instantiates ExampleRawScalarsPlugin. - Args: - context: A base_plugin.TBContext instance. - """ + Args: + context: A base_plugin.TBContext instance. + """ self._multiplexer = context.multiplexer def get_plugin_apps(self): @@ -64,10 +64,10 @@ def get_plugin_apps(self): def _serve_tags(self, request): """Serves run to tag info. - Frontend clients can use the Multiplexer's run+tag structure to request data - for a specific run+tag. Responds with a map of the form: - {runName: [tagName, tagName, ...]} - """ + Frontend clients can use the Multiplexer's run+tag structure to request data + for a specific run+tag. Responds with a map of the form: + {runName: [tagName, tagName, ...]} + """ run_tag_mapping = self._multiplexer.PluginRunToTagToContent( _SCALAR_PLUGIN_NAME ) @@ -81,10 +81,10 @@ def _serve_tags(self, request): def _serve_static_file(self, request): """Returns a resource file from the static asset directory. - Requests from the frontend have a path in this form: - /data/plugin/example_raw_scalars/static/foo - This serves the appropriate asset: ./static/foo. - """ + Requests from the frontend have a path in this form: + /data/plugin/example_raw_scalars/static/foo + This serves the appropriate asset: ./static/foo. + """ static_path_part = request.path[len(_PLUGIN_DIRECTORY_PATH_PART) :] res_path = os.path.join(os.path.dirname(__file__), static_path_part) @@ -97,9 +97,9 @@ def _serve_static_file(self, request): def is_active(self): """Returns whether there is relevant data for the plugin to process. - When there are no runs with scalar data, TensorBoard will hide the plugin - from the main navigation bar. - """ + When there are no runs with scalar data, TensorBoard will hide the plugin + from the main navigation bar. + """ return bool( self._multiplexer.PluginRunToTagToContent(_SCALAR_PLUGIN_NAME) ) @@ -110,20 +110,20 @@ def frontend_metadata(self): def scalars_impl(self, tag, run): """Returns scalar data for the specified tag and run. - For details on how to use tags and runs, see - https://github.com/tensorflow/tensorboard#tags-giving-names-to-data + For details on how to use tags and runs, see + https://github.com/tensorflow/tensorboard#tags-giving-names-to-data - Args: - tag: string - run: string + Args: + tag: string + run: string - Returns: - A list of ScalarEvents - tuples containing 3 numbers describing entries in - the data series. + Returns: + A list of ScalarEvents - tuples containing 3 numbers describing entries in + the data series. - Raises: - errors.NotFoundError: if run+tag pair has no scalar data. - """ + Raises: + errors.NotFoundError: if run+tag pair has no scalar data. + """ try: tensor_events = self._multiplexer.Tensors(run, tag) values = [ diff --git a/tensorboard/manager_e2e_test.py b/tensorboard/manager_e2e_test.py index 9b8843ca4d..7f2300ee39 100644 --- a/tensorboard/manager_e2e_test.py +++ b/tensorboard/manager_e2e_test.py @@ -222,12 +222,12 @@ def test_exit_failure(self): name="fail-with-77", program=textwrap.dedent( r""" - #!/bin/sh - printf >&2 'fatal: something bad happened\n' - printf 'also some stdout\n' - exit 77 - """.lstrip(), - ), + #!/bin/sh + printf >&2 'fatal: something bad happened\n' + printf 'also some stdout\n' + exit 77 + """ + ).lstrip(), ) start_result = manager.start(["--logdir=./logs", "--port=0"]) self.assertIsInstance(start_result, manager.StartFailed) @@ -251,12 +251,12 @@ def test_exit_success(self): name="fail-with-0", program=textwrap.dedent( r""" - #!/bin/sh - printf >&2 'info: something good happened\n' - printf 'also some standard output\n' - exit 0 - """.lstrip(), - ), + #!/bin/sh + printf >&2 'info: something good happened\n' + printf 'also some standard output\n' + exit 0 + """ + ).lstrip(), ) start_result = manager.start(["--logdir=./logs", "--port=0"]) self.assertIsInstance(start_result, manager.StartFailed) @@ -278,12 +278,12 @@ def test_failure_unreadable_stdio(self): name="fail-and-nuke-tmp", program=textwrap.dedent( r""" - #!/bin/sh - rm -r %s - exit 22 - """.lstrip() + #!/bin/sh + rm -r %s + exit 22 + """ % pipes.quote(self.tmproot), - ), + ).lstrip(), ) start_result = manager.start(["--logdir=./logs", "--port=0"]) self.assertIsInstance(start_result, manager.StartFailed) @@ -303,13 +303,13 @@ def test_timeout(self): name="wait-a-minute", program=textwrap.dedent( r""" - #!/bin/sh - printf >%s '%%s' "$$" - printf >&2 'warn: I am tired\n' - sleep 60 - """.lstrip() + #!/bin/sh + printf >%s '%%s' "$$" + printf >&2 'warn: I am tired\n' + sleep 60 + """ % pipes.quote(os.path.realpath(pid_file)), - ), + ).lstrip(), ) start_result = manager.start( ["--logdir=./logs", "--port=0"], @@ -329,12 +329,12 @@ def test_tensorboard_binary_environment_variable(self): filepath = os.path.join(tempdir, "tensorbad") program = textwrap.dedent( r""" - #!/bin/sh - printf >&2 'tensorbad: fatal: something bad happened\n' - printf 'tensorbad: also some stdout\n' - exit 77 - """.lstrip() - ) + #!/bin/sh + printf >&2 'tensorbad: fatal: something bad happened\n' + printf 'tensorbad: also some stdout\n' + exit 77 + """ + ).lstrip() with open(filepath, "w") as outfile: outfile.write(program) os.chmod(filepath, 0o777) diff --git a/tensorboard/notebook.py b/tensorboard/notebook.py index 406aad3a76..fe0e13aa40 100644 --- a/tensorboard/notebook.py +++ b/tensorboard/notebook.py @@ -92,9 +92,9 @@ def _get_context(): def load_ipython_extension(ipython): """Deprecated: use `%load_ext tensorboard` instead. - Raises: - RuntimeError: Always. - """ + Raises: + RuntimeError: Always. + """ raise RuntimeError( "Use '%load_ext tensorboard' instead of '%load_ext tensorboard.notebook'." ) @@ -344,16 +344,16 @@ def _display_colab(port, height, display_handle): import IPython.display shell = """ - (async () => { - const url = await google.colab.kernel.proxyPort(%PORT%, {"cache": true}); - const iframe = document.createElement('iframe'); - iframe.src = url; - iframe.setAttribute('width', '100%'); - iframe.setAttribute('height', '%HEIGHT%'); - iframe.setAttribute('frameborder', 0); - document.body.appendChild(iframe); - })(); - """ + (async () => { + const url = await google.colab.kernel.proxyPort(%PORT%, {"cache": true}); + const iframe = document.createElement('iframe'); + iframe.src = url; + iframe.setAttribute('width', '100%'); + iframe.setAttribute('height', '%HEIGHT%'); + iframe.setAttribute('frameborder', 0); + document.body.appendChild(iframe); + })(); + """ replacements = [ ("%PORT%", "%d" % port), ("%HEIGHT%", "%d" % height), diff --git a/tensorboard/plugin_util_test.py b/tensorboard/plugin_util_test.py index 23d6948bdd..777613a5df 100644 --- a/tensorboard/plugin_util_test.py +++ b/tensorboard/plugin_util_test.py @@ -45,42 +45,44 @@ def test_table_formatting(self): self._test( textwrap.dedent( u"""\ - Here is some data: + Here is some data: - TensorBoard usage | Happiness - ------------------|---------- - 0.0 | 0.0 - 0.5 | 0.5 - 1.0 | 1.0 + TensorBoard usage | Happiness + ------------------|---------- + 0.0 | 0.0 + 0.5 | 0.5 + 1.0 | 1.0 - Wouldn't you agree?""" + Wouldn't you agree? + """ ), textwrap.dedent( u"""\ -

Here is some data:

- - - - - - - - - - - - - - - - - - - - - -
TensorBoard usageHappiness
0.00.0
0.50.5
1.01.0
-

Wouldn't you agree?

""" +

Here is some data:

+ + + + + + + + + + + + + + + + + + + + + +
TensorBoard usageHappiness
0.00.0
0.50.5
1.01.0
+

Wouldn't you agree?

+ """.rstrip() ), ) diff --git a/tensorboard/plugins/beholder/im_util.py b/tensorboard/plugins/beholder/im_util.py index db3e6e9ef6..06c1039a5b 100644 --- a/tensorboard/plugins/beholder/im_util.py +++ b/tensorboard/plugins/beholder/im_util.py @@ -35,9 +35,9 @@ def global_extrema(arrays): def scale_sections(sections, scaling_scope): """ - input: unscaled sections. - returns: sections scaled to [0, 255] - """ + input: unscaled sections. + returns: sections scaled to [0, 255] + """ new_sections = [] if scaling_scope == "layer": diff --git a/tensorboard/plugins/beholder/visualizer.py b/tensorboard/plugins/beholder/visualizer.py index 46fbca7a19..ff64043445 100644 --- a/tensorboard/plugins/beholder/visualizer.py +++ b/tensorboard/plugins/beholder/visualizer.py @@ -183,10 +183,10 @@ def _determine_section_height(self, array, show_all): def _arrays_to_sections(self, arrays): """ - input: unprocessed numpy arrays. - returns: columns of the size that they will appear in the image, not scaled - for display. That needs to wait until after variance is computed. - """ + input: unprocessed numpy arrays. + returns: columns of the size that they will appear in the image, not scaled + for display. That needs to wait until after variance is computed. + """ sections = [] sections_to_resize_later = {} show_all = self.config["show_all"] diff --git a/tensorboard/plugins/core/core_plugin.py b/tensorboard/plugins/core/core_plugin.py index 4dfe678f1e..78c5fb7836 100644 --- a/tensorboard/plugins/core/core_plugin.py +++ b/tensorboard/plugins/core/core_plugin.py @@ -191,13 +191,13 @@ def _serve_runs(self, request): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - run_name, - started_time IS NULL as started_time_nulls_last, - started_time - FROM Runs - ORDER BY started_time_nulls_last, started_time, run_name - """ + SELECT + run_name, + started_time IS NULL as started_time_nulls_last, + started_time + FROM Runs + ORDER BY started_time_nulls_last, started_time, run_name + """ ) run_names = [row[0] for row in cursor] else: @@ -237,15 +237,15 @@ def list_experiments_impl(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - experiment_id, - experiment_name, - started_time, - started_time IS NULL as started_time_nulls_last - FROM Experiments - ORDER BY started_time_nulls_last, started_time, experiment_name, - experiment_id - """ + SELECT + experiment_id, + experiment_name, + started_time, + started_time IS NULL as started_time_nulls_last + FROM Experiments + ORDER BY started_time_nulls_last, started_time, experiment_name, + experiment_id + """ ) results = [ {"id": row[0], "name": row[1], "startTime": row[2],} @@ -272,28 +272,28 @@ def _serve_experiment_runs(self, request): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - Runs.run_id, - Runs.run_name, - Runs.started_time, - Runs.started_time IS NULL as started_time_nulls_last, - Tags.tag_id, - Tags.tag_name, - Tags.display_name, - Tags.plugin_name, - Tags.inserted_time - From Runs - LEFT JOIN Tags ON Runs.run_id = Tags.run_id - WHERE Runs.experiment_id = ? - AND (Tags.tag_id IS NULL OR Tags.plugin_name IS NOT NULL) - ORDER BY started_time_nulls_last, - Runs.started_time, - Runs.run_name, - Runs.run_id, - Tags.tag_name, - Tags.display_name, - Tags.inserted_time; - """, + SELECT + Runs.run_id, + Runs.run_name, + Runs.started_time, + Runs.started_time IS NULL as started_time_nulls_last, + Tags.tag_id, + Tags.tag_name, + Tags.display_name, + Tags.plugin_name, + Tags.inserted_time + From Runs + LEFT JOIN Tags ON Runs.run_id = Tags.run_id + WHERE Runs.experiment_id = ? + AND (Tags.tag_id IS NULL OR Tags.plugin_name IS NOT NULL) + ORDER BY started_time_nulls_last, + Runs.started_time, + Runs.run_name, + Runs.run_id, + Tags.tag_name, + Tags.display_name, + Tags.inserted_time; + """, (exp_id,), ) for row in cursor: diff --git a/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py b/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py index 9212482583..e1c6808d87 100644 --- a/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py +++ b/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py @@ -264,18 +264,18 @@ def scalars_impl(self, run, tag_regex_string): @wrappers.Request.application def layout_route(self, request): - r"""Fetches the custom layout specified by the config file in the logdir. + """Fetches the custom layout specified by the config file in the logdir. - If more than 1 run contains a layout, this method merges the layouts by - merging charts within individual categories. If 2 categories with the same - name are found, the charts within are merged. The merging is based on the - order of the runs to which the layouts are written. + If more than 1 run contains a layout, this method merges the layouts by + merging charts within individual categories. If 2 categories with the same + name are found, the charts within are merged. The merging is based on the + order of the runs to which the layouts are written. - The response is a JSON object mirroring properties of the Layout proto if a - layout for any run is found. + The response is a JSON object mirroring properties of the Layout proto if a + layout for any run is found. - The response is an empty object if no layout could be found. - """ + The response is an empty object if no layout could be found. + """ body = self.layout_impl() return http_util.Respond(request, body, "application/json") diff --git a/tensorboard/plugins/graph/graph_util_test.py b/tensorboard/plugins/graph/graph_util_test.py index c456727b63..552222a133 100644 --- a/tensorboard/plugins/graph/graph_util_test.py +++ b/tensorboard/plugins/graph/graph_util_test.py @@ -23,84 +23,84 @@ class GraphUtilTest(tf.test.TestCase): def test_combine_graph_defs(self): expected_proto = """ - node { - name: "X" - op: "Input" - } - node { - name: "W" - op: "Input" - } - node { - name: "Y" - op: "MatMul" - input: "X" - input: "W" - } - node { - name: "A" - op: "Input" - } - node { - name: "B" - op: "Input" - } - node { - name: "C" - op: "MatMul" - input: "A" - input: "B" - } - versions { - producer: 21 - } - """ + node { + name: "X" + op: "Input" + } + node { + name: "W" + op: "Input" + } + node { + name: "Y" + op: "MatMul" + input: "X" + input: "W" + } + node { + name: "A" + op: "Input" + } + node { + name: "B" + op: "Input" + } + node { + name: "C" + op: "MatMul" + input: "A" + input: "B" + } + versions { + producer: 21 + } + """ graph_def_a = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "W" - op: "Input" - } - node { - name: "Y" - op: "MatMul" - input: "X" - input: "W" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "W" + op: "Input" + } + node { + name: "Y" + op: "MatMul" + input: "X" + input: "W" + } + versions { + producer: 21 + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - node { - name: "A" - op: "Input" - } - node { - name: "B" - op: "Input" - } - node { - name: "C" - op: "MatMul" - input: "A" - input: "B" - } - versions { - producer: 21 - } - """, + node { + name: "A" + op: "Input" + } + node { + name: "B" + op: "Input" + } + node { + name: "C" + op: "MatMul" + input: "A" + input: "B" + } + versions { + producer: 21 + } + """, graph_def_b, ) @@ -111,68 +111,68 @@ def test_combine_graph_defs(self): def test_combine_graph_defs_name_collided_but_same_content(self): expected_proto = """ - node { - name: "X" - op: "Input" - } - node { - name: "W" - op: "Input" - } - node { - name: "Y" - op: "MatMul" - input: "X" - input: "W" - } - node { - name: "A" - op: "Input" - } - versions { - producer: 21 - } - """ + node { + name: "X" + op: "Input" + } + node { + name: "W" + op: "Input" + } + node { + name: "Y" + op: "MatMul" + input: "X" + input: "W" + } + node { + name: "A" + op: "Input" + } + versions { + producer: 21 + } + """ graph_def_a = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "W" - op: "Input" - } - node { - name: "Y" - op: "MatMul" - input: "X" - input: "W" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "W" + op: "Input" + } + node { + name: "Y" + op: "MatMul" + input: "X" + input: "W" + } + versions { + producer: 21 + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "A" - op: "Input" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "A" + op: "Input" + } + versions { + producer: 21 + } + """, graph_def_b, ) @@ -185,49 +185,49 @@ def test_combine_graph_defs_name_collided_different_content(self): graph_def_a = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "W" - op: "Input" - } - node { - name: "Y" - op: "MatMul" - input: "X" - input: "W" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "W" + op: "Input" + } + node { + name: "Y" + op: "MatMul" + input: "X" + input: "W" + } + versions { + producer: 21 + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - device: "cpu:0" - } - node { - name: "Z" - op: "Input" - } - node { - name: "Q" - op: "MatMul" - input: "X" - input: "Z" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + device: "cpu:0" + } + node { + name: "Z" + op: "Input" + } + node { + name: "Q" + op: "MatMul" + input: "X" + input: "Z" + } + versions { + producer: 21 + } + """, graph_def_b, ) @@ -245,36 +245,36 @@ def test_combine_graph_defs_dst_nodes_duplicate_keys(self): graph_def_a = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "X" - op: "Input" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "X" + op: "Input" + } + versions { + producer: 21 + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "Z" - op: "Input" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "Z" + op: "Input" + } + versions { + producer: 21 + } + """, graph_def_b, ) @@ -287,37 +287,37 @@ def test_combine_graph_defs_src_nodes_duplicate_keys(self): graph_def_a = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "Y" - op: "Input" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "Y" + op: "Input" + } + versions { + producer: 21 + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - node { - name: "W" - op: "Input" - device: "cpu:0" - } - node { - name: "W" - op: "Input" - } - versions { - producer: 21 - } - """, + node { + name: "W" + op: "Input" + device: "cpu:0" + } + node { + name: "W" + op: "Input" + } + versions { + producer: 21 + } + """, graph_def_b, ) @@ -328,120 +328,120 @@ def test_combine_graph_defs_src_nodes_duplicate_keys(self): def test_combine_graph_defs_function(self): expected_proto = """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - function { - signature { - name: "foo_1" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + function { + signature { + name: "foo_1" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """ + """ graph_def_a = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - function { - signature { - name: "foo_1" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + function { + signature { + name: "foo_1" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + } + """, graph_def_b, ) @@ -454,75 +454,75 @@ def test_combine_graph_defs_function_collison(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "div" - op: "Div" - input: "x" - input: "y" - } - } - function { - signature { - name: "foo_1" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "div" + op: "Div" + input: "x" + input: "y" + } + } + function { + signature { + name: "foo_1" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + } + """, graph_def_b, ) @@ -540,69 +540,69 @@ def test_combine_graph_defs_dst_function_duplicate_keys(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - function { - signature { - name: "foo" - input_arg { - name: "y" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + function { + signature { + name: "foo" + input_arg { + name: "y" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "bar" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "div" - op: "Div" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "bar" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "div" + op: "Div" + input: "x" + input: "y" + } + } + } + """, graph_def_b, ) @@ -617,63 +617,63 @@ def test_combine_graph_defs_src_function_duplicate_keys(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "bar" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - } - function { - signature { - name: "bar" - input_arg { - name: "y" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - } - } - """, + library { + function { + signature { + name: "bar" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + } + function { + signature { + name: "bar" + input_arg { + name: "y" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + } + } + """, graph_def_b, ) @@ -686,45 +686,45 @@ def test_combine_graph_defs_src_function_duplicate_keys(self): def test_combine_graph_defs_gradient(self): expected_proto = """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - gradient { - function_name: "bar" - gradient_func: "bar_grad" - } - } - """ + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + gradient { + function_name: "bar" + gradient_func: "bar_grad" + } + } + """ graph_def_a = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - } - """, + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - gradient { - function_name: "bar" - gradient_func: "bar_grad" - } - } - """, + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + gradient { + function_name: "bar" + gradient_func: "bar_grad" + } + } + """, graph_def_b, ) @@ -737,30 +737,30 @@ def test_combine_graph_defs_gradient_collison(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - } - """, + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "bar" - gradient_func: "bar_grad" - } - gradient { - function_name: "foo_1" - gradient_func: "foo_grad" - } - } - """, + library { + gradient { + function_name: "bar" + gradient_func: "bar_grad" + } + gradient { + function_name: "foo_1" + gradient_func: "foo_grad" + } + } + """, graph_def_b, ) @@ -778,30 +778,30 @@ def test_combine_graph_defs_dst_gradient_func_non_unique(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - gradient { - function_name: "foo_bar" - gradient_func: "foo_grad" - } - } - """, + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + gradient { + function_name: "foo_bar" + gradient_func: "foo_grad" + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "bar" - gradient_func: "bar_grad" - } - } - """, + library { + gradient { + function_name: "bar" + gradient_func: "bar_grad" + } + } + """, graph_def_b, ) @@ -816,30 +816,30 @@ def test_combine_graph_defs_src_gradient_func_non_unique(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - } - """, + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "bar" - gradient_func: "bar_grad" - } - gradient { - function_name: "bar_baz" - gradient_func: "bar_grad" - } - } - """, + library { + gradient { + function_name: "bar" + gradient_func: "bar_grad" + } + gradient { + function_name: "bar_baz" + gradient_func: "bar_grad" + } + } + """, graph_def_b, ) diff --git a/tensorboard/plugins/graph/keras_util.py b/tensorboard/plugins/graph/keras_util.py index 646569361e..30f307a2ce 100644 --- a/tensorboard/plugins/graph/keras_util.py +++ b/tensorboard/plugins/graph/keras_util.py @@ -47,14 +47,15 @@ def _walk_layers(keras_layer): """Walks the nested keras layer configuration in preorder. - Args: - keras_layer: Keras configuration from model.to_json. - - Yields: - A tuple of (name_scope, layer_config). - name_scope: a string representing a scope name, similar to that of tf.name_scope. - layer_config: a dict representing a Keras layer configuration. - """ + + Args: + keras_layer: Keras configuration from model.to_json. + + Yields: + A tuple of (name_scope, layer_config). + name_scope: a string representing a scope name, similar to that of tf.name_scope. + layer_config: a dict representing a Keras layer configuration. + """ yield ("", keras_layer) if keras_layer.get("config").get("layers"): name_scope = keras_layer.get("config").get("name") diff --git a/tensorboard/plugins/graph/keras_util_test.py b/tensorboard/plugins/graph/keras_util_test.py index 669fafed81..aefae88f5f 100644 --- a/tensorboard/plugins/graph/keras_util_test.py +++ b/tensorboard/plugins/graph/keras_util_test.py @@ -37,70 +37,70 @@ def assertGraphDefToModel(self, expected_proto, model): def test_keras_model_to_graph_def_sequential_model(self): expected_proto = """ - node { - name: "sequential/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential/my_relu" - input: "sequential/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - node { - name: "sequential/dense_1" - input: "sequential/my_relu" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential/activation" - input: "sequential/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - """ + node { + name: "sequential/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential/my_relu" + input: "sequential/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + node { + name: "sequential/dense_1" + input: "sequential/my_relu" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential/activation" + input: "sequential/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + """ model = tf.keras.models.Sequential( [ tf.keras.layers.Dense(32, input_shape=(784,)), @@ -113,70 +113,70 @@ def test_keras_model_to_graph_def_sequential_model(self): def test_keras_model_to_graph_def_functional_model(self): expected_proto = """ - node { - name: "model/functional_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/dense" - input: "model/functional_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/dense_1" - input: "model/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/dense_2" - input: "model/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - """ + node { + name: "model/functional_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/dense" + input: "model/functional_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/dense_1" + input: "model/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/dense_2" + input: "model/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + """ inputs = tf.keras.layers.Input(shape=(784,), name="functional_input") d0 = tf.keras.layers.Dense(64, activation="relu") d1 = tf.keras.layers.Dense(64, activation="relu") @@ -187,71 +187,71 @@ def test_keras_model_to_graph_def_functional_model(self): def test_keras_model_to_graph_def_functional_model_with_cycle(self): expected_proto = """ - node { - name: "model/cycle_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/dense" - input: "model/cycle_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/dense_1" - input: "model/dense" - input: "model/dense_2" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/dense_2" - input: "model/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - """ + node { + name: "model/cycle_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/dense" + input: "model/cycle_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/dense_1" + input: "model/dense" + input: "model/dense_2" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/dense_2" + input: "model/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + """ inputs = tf.keras.layers.Input(shape=(784,), name="cycle_input") d0 = tf.keras.layers.Dense(64, activation="relu") d1 = tf.keras.layers.Dense(64, activation="relu") @@ -264,38 +264,38 @@ def test_keras_model_to_graph_def_functional_model_with_cycle(self): def test_keras_model_to_graph_def_lstm_model(self): expected_proto = """ - node { - name: "model/lstm_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/simple_rnn" - input: "model/lstm_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "SimpleRNN" - } - } - } - """ + node { + name: "model/lstm_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/simple_rnn" + input: "model/lstm_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "SimpleRNN" + } + } + } + """ inputs = tf.keras.layers.Input(shape=(None, 5), name="lstm_input") encoder = tf.keras.layers.SimpleRNN(256) @@ -304,86 +304,86 @@ def test_keras_model_to_graph_def_lstm_model(self): def test_keras_model_to_graph_def_nested_sequential_model(self): expected_proto = """ - node { - name: "sequential_2/sequential_1/sequential/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential_2/sequential_1/sequential/activation" - input: "sequential_2/sequential_1/sequential/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - node { - name: "sequential_2/sequential_1/my_relu" - input: "sequential_2/sequential_1/sequential/activation" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - node { - name: "sequential_2/dense_1" - input: "sequential_2/sequential_1/my_relu" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential_2/activation_1" - input: "sequential_2/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - """ + node { + name: "sequential_2/sequential_1/sequential/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential_2/sequential_1/sequential/activation" + input: "sequential_2/sequential_1/sequential/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + node { + name: "sequential_2/sequential_1/my_relu" + input: "sequential_2/sequential_1/sequential/activation" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + node { + name: "sequential_2/dense_1" + input: "sequential_2/sequential_1/my_relu" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential_2/activation_1" + input: "sequential_2/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + """ sub_sub_model = tf.keras.models.Sequential( [ tf.keras.layers.Dense(32, input_shape=(784,)), @@ -407,134 +407,134 @@ def test_keras_model_to_graph_def_nested_sequential_model(self): def test_keras_model_to_graph_def_functional_multi_inputs(self): expected_proto = """ - node { - name: "model/main_input" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/embedding" - input: "model/main_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Embedding" - } - } - } - node { - name: "model/simple_rnn" - input: "model/embedding" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "SimpleRNN" - } - } - } - node { - name: "model/aux_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/concatenate" - input: "model/simple_rnn" - input: "model/aux_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Concatenate" - } - } - } - node { - name: "model/dense" - input: "model/concatenate" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/main_output" - input: "model/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/aux_output" - input: "model/simple_rnn" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - """ + node { + name: "model/main_input" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/embedding" + input: "model/main_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Embedding" + } + } + } + node { + name: "model/simple_rnn" + input: "model/embedding" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "SimpleRNN" + } + } + } + node { + name: "model/aux_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/concatenate" + input: "model/simple_rnn" + input: "model/aux_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Concatenate" + } + } + } + node { + name: "model/dense" + input: "model/concatenate" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/main_output" + input: "model/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/aux_output" + input: "model/simple_rnn" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + """ main_input = tf.keras.layers.Input( shape=(100,), dtype="int32", name="main_input" ) @@ -564,132 +564,132 @@ def test_keras_model_to_graph_def_functional_multi_inputs(self): def test_keras_model_to_graph_def_functional_model_as_layer(self): expected_proto = """ - node { - name: "model_1/sub_func_input_2" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model_1/sub_func_input_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model_1/model/sub_func_input_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model_1/model/sub_func_input_2" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model_1/model/dense" - input: "model_1/model/sub_func_input_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model_1/model/dense_1" - input: "model_1/model/sub_func_input_2" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model_1/concatenate" - input: "model_1/model/dense" - input: "model_1/model/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Concatenate" - } - } - } - node { - name: "model_1/dense_2" - input: "model_1/concatenate" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - """ + node { + name: "model_1/sub_func_input_2" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model_1/sub_func_input_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model_1/model/sub_func_input_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model_1/model/sub_func_input_2" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model_1/model/dense" + input: "model_1/model/sub_func_input_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model_1/model/dense_1" + input: "model_1/model/sub_func_input_2" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model_1/concatenate" + input: "model_1/model/dense" + input: "model_1/model/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Concatenate" + } + } + } + node { + name: "model_1/dense_2" + input: "model_1/concatenate" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + """ inputs1 = tf.keras.layers.Input(shape=(784,), name="sub_func_input_1") inputs2 = tf.keras.layers.Input(shape=(784,), name="sub_func_input_2") d0 = tf.keras.layers.Dense(64, activation="relu") @@ -711,70 +711,70 @@ def test_keras_model_to_graph_def_functional_model_as_layer(self): def test_keras_model_to_graph_def_functional_sequential_model(self): expected_proto = """ - node { - name: "model/func_seq_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/sequential/dense" - input: "model/func_seq_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/sequential/my_relu" - input: "model/sequential/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - node { - name: "model/dense_1" - input: "model/sequential/my_relu" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - """ + node { + name: "model/func_seq_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/sequential/dense" + input: "model/func_seq_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/sequential/my_relu" + input: "model/sequential/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + node { + name: "model/dense_1" + input: "model/sequential/my_relu" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + """ inputs = tf.keras.layers.Input(shape=(784,), name="func_seq_input") sub_model = tf.keras.models.Sequential( [ @@ -792,70 +792,70 @@ def test_keras_model_to_graph_def_functional_sequential_model(self): def test_keras_model_to_graph_def_sequential_functional_model(self): expected_proto = """ - node { - name: "sequential/model/func_seq_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "sequential/model/dense" - input: "sequential/model/func_seq_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential/dense_1" - input: "sequential/model/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential/my_relu" - input: "sequential/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - """ + node { + name: "sequential/model/func_seq_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "sequential/model/dense" + input: "sequential/model/func_seq_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential/dense_1" + input: "sequential/model/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential/my_relu" + input: "sequential/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + """ inputs = tf.keras.layers.Input(shape=(784,), name="func_seq_input") dense = tf.keras.layers.Dense(64, activation="relu") diff --git a/tensorboard/plugins/histogram/histograms_plugin.py b/tensorboard/plugins/histogram/histograms_plugin.py index 201521a30e..b3367d2e56 100644 --- a/tensorboard/plugins/histogram/histograms_plugin.py +++ b/tensorboard/plugins/histogram/histograms_plugin.py @@ -86,12 +86,12 @@ def is_active(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - 1 - FROM Tags - WHERE Tags.plugin_name = ? - LIMIT 1 - """, + SELECT + 1 + FROM Tags + WHERE Tags.plugin_name = ? + LIMIT 1 + """, (metadata.PLUGIN_NAME,), ) return bool(list(cursor)) @@ -125,16 +125,16 @@ def index_impl(self, experiment): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - Tags.tag_name, - Tags.display_name, - Runs.run_name - FROM Tags - JOIN Runs - ON Tags.run_id = Runs.run_id - WHERE - Tags.plugin_name = ? - """, + SELECT + Tags.tag_name, + Tags.display_name, + Runs.run_name + FROM Tags + JOIN Runs + ON Tags.run_id = Runs.run_id + WHERE + Tags.plugin_name = ? + """, (metadata.PLUGIN_NAME,), ) result = collections.defaultdict(dict) @@ -213,15 +213,15 @@ def histograms_impl(self, tag, run, experiment, downsample_to=None): # Prefetch the tag ID matching this run and tag. cursor.execute( """ - SELECT - tag_id - FROM Tags - JOIN Runs USING (run_id) - WHERE - Runs.run_name = :run - AND Tags.tag_name = :tag - AND Tags.plugin_name = :plugin - """, + SELECT + tag_id + FROM Tags + JOIN Runs USING (run_id) + WHERE + Runs.run_name = :run + AND Tags.tag_name = :tag + AND Tags.plugin_name = :plugin + """, {"run": run, "tag": tag, "plugin": metadata.PLUGIN_NAME}, ) row = cursor.fetchone() @@ -240,30 +240,30 @@ def histograms_impl(self, tag, run, experiment, downsample_to=None): # [s_min + math.ceil(i / k * (s_max - s_min)) for i in range(0, k + 1)] cursor.execute( """ - SELECT - MIN(step) AS step, - computed_time, - data, - dtype, - shape - FROM Tensors - INNER JOIN ( - SELECT - MIN(step) AS min_step, - MAX(step) AS max_step - FROM Tensors - /* Filter out NULL so we can use TensorSeriesStepIndex. */ - WHERE series = :tag_id AND step IS NOT NULL - ) - /* Ensure we omit reserved rows, which have NULL step values. */ - WHERE series = :tag_id AND step IS NOT NULL - /* Bucket rows into sample_size linearly spaced buckets, or do - no sampling if sample_size is NULL. */ - GROUP BY - IFNULL(:sample_size - 1, max_step - min_step) - * (step - min_step) / (max_step - min_step) - ORDER BY step - """, + SELECT + MIN(step) AS step, + computed_time, + data, + dtype, + shape + FROM Tensors + INNER JOIN ( + SELECT + MIN(step) AS min_step, + MAX(step) AS max_step + FROM Tensors + /* Filter out NULL so we can use TensorSeriesStepIndex. */ + WHERE series = :tag_id AND step IS NOT NULL + ) + /* Ensure we omit reserved rows, which have NULL step values. */ + WHERE series = :tag_id AND step IS NOT NULL + /* Bucket rows into sample_size linearly spaced buckets, or do + no sampling if sample_size is NULL. */ + GROUP BY + IFNULL(:sample_size - 1, max_step - min_step) + * (step - min_step) / (max_step - min_step) + ORDER BY step + """, {"tag_id": tag_id, "sample_size": downsample_to}, ) events = [ diff --git a/tensorboard/plugins/hparams/backend_context_test.py b/tensorboard/plugins/hparams/backend_context_test.py index 190a5c68f6..b73c60d3af 100644 --- a/tensorboard/plugins/hparams/backend_context_test.py +++ b/tensorboard/plugins/hparams/backend_context_test.py @@ -99,11 +99,11 @@ def _mock_plugin_run_to_tag_to_content(self, plugin_name): def test_experiment_with_experiment_tag(self): experiment = """ - description: 'Test experiment' - metric_infos: [ - { name: { tag: 'current_temp' } } - ] - """ + description: 'Test experiment' + metric_infos: [ + { name: { tag: 'current_temp' } } + ] + """ self._mock_multiplexer.PluginRunToTagToContent.side_effect = None self._mock_multiplexer.PluginRunToTagToContent.return_value = { "exp": { @@ -117,56 +117,56 @@ def test_experiment_with_experiment_tag(self): def test_experiment_without_experiment_tag(self): self.session_1_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {number_value: 100}}, - {key: 'lr' value: {number_value: 0.01}}, - {key: 'model_type' value: {string_value: 'CNN'}} - ] - """ + hparams: [ + {key: 'batch_size' value: {number_value: 100}}, + {key: 'lr' value: {number_value: 0.01}}, + {key: 'model_type' value: {string_value: 'CNN'}} + ] + """ self.session_2_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {number_value: 200}}, - {key: 'lr' value: {number_value: 0.02}}, - {key: 'model_type' value: {string_value: 'LATTICE'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {number_value: 200}}, + {key: 'lr' value: {number_value: 0.02}}, + {key: 'model_type' value: {string_value: 'LATTICE'}} + ] + """ self.session_3_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {number_value: 300}}, - {key: 'lr' value: {number_value: 0.05}}, - {key: 'model_type' value: {string_value: 'CNN'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {number_value: 300}}, + {key: 'lr' value: {number_value: 0.05}}, + {key: 'model_type' value: {string_value: 'CNN'}} + ] + """ expected_exp = """ - hparam_infos: { - name: 'batch_size' - type: DATA_TYPE_FLOAT64 - }, - hparam_infos: { - name: 'lr' - type: DATA_TYPE_FLOAT64 - }, - hparam_infos: { - name: 'model_type' - type: DATA_TYPE_STRING - domain_discrete: { - values: [{string_value: 'CNN'}, - {string_value: 'LATTICE'}] - } - } - metric_infos: { - name: {group: '', tag: 'accuracy'} - } - metric_infos: { - name: {group: '', tag: 'loss'} - } - metric_infos: { - name: {group: 'eval', tag: 'loss'} - } - metric_infos: { - name: {group: 'train', tag: 'loss'} - } - """ + hparam_infos: { + name: 'batch_size' + type: DATA_TYPE_FLOAT64 + }, + hparam_infos: { + name: 'lr' + type: DATA_TYPE_FLOAT64 + }, + hparam_infos: { + name: 'model_type' + type: DATA_TYPE_STRING + domain_discrete: { + values: [{string_value: 'CNN'}, + {string_value: 'LATTICE'}] + } + } + metric_infos: { + name: {group: '', tag: 'accuracy'} + } + metric_infos: { + name: {group: '', tag: 'loss'} + } + metric_infos: { + name: {group: 'eval', tag: 'loss'} + } + metric_infos: { + name: {group: 'train', tag: 'loss'} + } + """ ctxt = backend_context.Context(self._mock_tb_context) actual_exp = ctxt.experiment() _canonicalize_experiment(actual_exp) @@ -174,61 +174,61 @@ def test_experiment_without_experiment_tag(self): def test_experiment_without_experiment_tag_different_hparam_types(self): self.session_1_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {number_value: 100}}, - {key: 'lr' value: {string_value: '0.01'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {number_value: 100}}, + {key: 'lr' value: {string_value: '0.01'}} + ] + """ self.session_2_start_info_ = """ - hparams:[ - {key: 'lr' value: {number_value: 0.02}}, - {key: 'model_type' value: {string_value: 'LATTICE'}} - ] - """ + hparams:[ + {key: 'lr' value: {number_value: 0.02}}, + {key: 'model_type' value: {string_value: 'LATTICE'}} + ] + """ self.session_3_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {bool_value: true}}, - {key: 'model_type' value: {string_value: 'CNN'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {bool_value: true}}, + {key: 'model_type' value: {string_value: 'CNN'}} + ] + """ expected_exp = """ - hparam_infos: { - name: 'batch_size' - type: DATA_TYPE_STRING - domain_discrete: { - values: [{string_value: '100.0'}, - {string_value: 'true'}] - } - } - hparam_infos: { - name: 'lr' - type: DATA_TYPE_STRING - domain_discrete: { - values: [{string_value: '0.01'}, - {string_value: '0.02'}] - } - } - hparam_infos: { - name: 'model_type' - type: DATA_TYPE_STRING - domain_discrete: { - values: [{string_value: 'CNN'}, - {string_value: 'LATTICE'}] - } - } - metric_infos: { - name: {group: '', tag: 'accuracy'} - } - metric_infos: { - name: {group: '', tag: 'loss'} - } - metric_infos: { - name: {group: 'eval', tag: 'loss'} - } - metric_infos: { - name: {group: 'train', tag: 'loss'} - } - """ + hparam_infos: { + name: 'batch_size' + type: DATA_TYPE_STRING + domain_discrete: { + values: [{string_value: '100.0'}, + {string_value: 'true'}] + } + } + hparam_infos: { + name: 'lr' + type: DATA_TYPE_STRING + domain_discrete: { + values: [{string_value: '0.01'}, + {string_value: '0.02'}] + } + } + hparam_infos: { + name: 'model_type' + type: DATA_TYPE_STRING + domain_discrete: { + values: [{string_value: 'CNN'}, + {string_value: 'LATTICE'}] + } + } + metric_infos: { + name: {group: '', tag: 'accuracy'} + } + metric_infos: { + name: {group: '', tag: 'loss'} + } + metric_infos: { + name: {group: 'eval', tag: 'loss'} + } + metric_infos: { + name: {group: 'train', tag: 'loss'} + } + """ ctxt = backend_context.Context(self._mock_tb_context) actual_exp = ctxt.experiment() _canonicalize_experiment(actual_exp) @@ -236,52 +236,52 @@ def test_experiment_without_experiment_tag_different_hparam_types(self): def test_experiment_without_experiment_tag_many_distinct_values(self): self.session_1_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {number_value: 100}}, - {key: 'lr' value: {string_value: '0.01'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {number_value: 100}}, + {key: 'lr' value: {string_value: '0.01'}} + ] + """ self.session_2_start_info_ = """ - hparams:[ - {key: 'lr' value: {number_value: 0.02}}, - {key: 'model_type' value: {string_value: 'CNN'}} - ] - """ + hparams:[ + {key: 'lr' value: {number_value: 0.02}}, + {key: 'model_type' value: {string_value: 'CNN'}} + ] + """ self.session_3_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {bool_value: true}}, - {key: 'model_type' value: {string_value: 'CNN'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {bool_value: true}}, + {key: 'model_type' value: {string_value: 'CNN'}} + ] + """ expected_exp = """ - hparam_infos: { - name: 'batch_size' - type: DATA_TYPE_STRING - } - hparam_infos: { - name: 'lr' - type: DATA_TYPE_STRING - } - hparam_infos: { - name: 'model_type' - type: DATA_TYPE_STRING - domain_discrete: { - values: [{string_value: 'CNN'}] - } - } - metric_infos: { - name: {group: '', tag: 'accuracy'} - } - metric_infos: { - name: {group: '', tag: 'loss'} - } - metric_infos: { - name: {group: 'eval', tag: 'loss'} - } - metric_infos: { - name: {group: 'train', tag: 'loss'} - } - """ + hparam_infos: { + name: 'batch_size' + type: DATA_TYPE_STRING + } + hparam_infos: { + name: 'lr' + type: DATA_TYPE_STRING + } + hparam_infos: { + name: 'model_type' + type: DATA_TYPE_STRING + domain_discrete: { + values: [{string_value: 'CNN'}] + } + } + metric_infos: { + name: {group: '', tag: 'accuracy'} + } + metric_infos: { + name: {group: '', tag: 'loss'} + } + metric_infos: { + name: {group: 'eval', tag: 'loss'} + } + metric_infos: { + name: {group: 'train', tag: 'loss'} + } + """ ctxt = backend_context.Context( self._mock_tb_context, max_domain_discrete_len=1 ) diff --git a/tensorboard/plugins/hparams/keras_test.py b/tensorboard/plugins/hparams/keras_test.py index 152611eed7..d7ec6598a9 100644 --- a/tensorboard/plugins/hparams/keras_test.py +++ b/tensorboard/plugins/hparams/keras_test.py @@ -110,21 +110,21 @@ def mock_time(): expected_start_pb = plugin_data_pb2.SessionStartInfo() text_format.Merge( """ - start_time_secs: 1234.5 - group_name: "my_trial" - hparams { - key: "optimizer" - value { - string_value: "adam" - } - } - hparams { - key: "dense_neurons" - value { - number_value: 8.0 - } - } - """, + start_time_secs: 1234.5 + group_name: "my_trial" + hparams { + key: "optimizer" + value { + string_value: "adam" + } + } + hparams { + key: "dense_neurons" + value { + number_value: 8.0 + } + } + """, expected_start_pb, ) self.assertEqual(start_pb, expected_start_pb) @@ -132,9 +132,9 @@ def mock_time(): expected_end_pb = plugin_data_pb2.SessionEndInfo() text_format.Merge( """ - end_time_secs: 6789.0 - status: STATUS_SUCCESS - """, + end_time_secs: 6789.0 + status: STATUS_SUCCESS + """, expected_end_pb, ) self.assertEqual(end_pb, expected_end_pb) diff --git a/tensorboard/plugins/hparams/list_metric_evals_test.py b/tensorboard/plugins/hparams/list_metric_evals_test.py index da14f7dea5..8f3d489832 100644 --- a/tensorboard/plugins/hparams/list_metric_evals_test.py +++ b/tensorboard/plugins/hparams/list_metric_evals_test.py @@ -57,11 +57,13 @@ def _run_handler(self, request): def test_run(self): result = self._run_handler( - """session_name: '/this/is/a/session' - metric_name: { - tag: 'metric_tag' - group: 'metric_group' - }""" + """ + session_name: '/this/is/a/session' + metric_name: { + tag: 'metric_tag' + group: 'metric_group' + } + """ ) self.assertEqual([(1, 1, 1.0), (2, 2, 2.0), (3, 3, 3.0)], result) diff --git a/tensorboard/plugins/hparams/list_session_groups_test.py b/tensorboard/plugins/hparams/list_session_groups_test.py index 800ad4d259..8c58e5bde6 100644 --- a/tensorboard/plugins/hparams/list_session_groups_test.py +++ b/tensorboard/plugins/hparams/list_session_groups_test.py @@ -63,140 +63,140 @@ def setUp(self): metadata.EXPERIMENT_TAG: self._serialized_plugin_data( DATA_TYPE_EXPERIMENT, """ - description: 'Test experiment' - user: 'Test user' - hparam_infos: [ - { - name: 'initial_temp' - type: DATA_TYPE_FLOAT64 - }, - { - name: 'final_temp' - type: DATA_TYPE_FLOAT64 - }, - { name: 'string_hparam' }, - { name: 'bool_hparam' }, - { name: 'optional_string_hparam' } - ] - metric_infos: [ - { name: { tag: 'current_temp' } }, - { name: { tag: 'delta_temp' } }, - { name: { tag: 'optional_metric' } } - ] - """, + description: 'Test experiment' + user: 'Test user' + hparam_infos: [ + { + name: 'initial_temp' + type: DATA_TYPE_FLOAT64 + }, + { + name: 'final_temp' + type: DATA_TYPE_FLOAT64 + }, + { name: 'string_hparam' }, + { name: 'bool_hparam' }, + { name: 'optional_string_hparam' } + ] + metric_infos: [ + { name: { tag: 'current_temp' } }, + { name: { tag: 'delta_temp' } }, + { name: { tag: 'optional_metric' } } + ] + """, ) }, "session_1": { metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_START_INFO, """ - hparams:{ key: 'initial_temp' value: { number_value: 270 } }, - hparams:{ key: 'final_temp' value: { number_value: 150 } }, - hparams:{ - key: 'string_hparam' value: { string_value: 'a string' } - }, - hparams:{ key: 'bool_hparam' value: { bool_value: true } } - group_name: 'group_1' - start_time_secs: 314159 - """, + hparams:{ key: 'initial_temp' value: { number_value: 270 } }, + hparams:{ key: 'final_temp' value: { number_value: 150 } }, + hparams:{ + key: 'string_hparam' value: { string_value: 'a string' } + }, + hparams:{ key: 'bool_hparam' value: { bool_value: true } } + group_name: 'group_1' + start_time_secs: 314159 + """, ), metadata.SESSION_END_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_END_INFO, """ - status: STATUS_SUCCESS - end_time_secs: 314164 - """, + status: STATUS_SUCCESS + end_time_secs: 314164 + """, ), }, "session_2": { metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_START_INFO, """ - hparams:{ key: 'initial_temp' value: { number_value: 280 } }, - hparams:{ key: 'final_temp' value: { number_value: 100 } }, - hparams:{ - key: 'string_hparam' value: { string_value: 'AAAAA' } - }, - hparams:{ key: 'bool_hparam' value: { bool_value: false } } - group_name: 'group_2' - start_time_secs: 314159 - """, + hparams:{ key: 'initial_temp' value: { number_value: 280 } }, + hparams:{ key: 'final_temp' value: { number_value: 100 } }, + hparams:{ + key: 'string_hparam' value: { string_value: 'AAAAA' } + }, + hparams:{ key: 'bool_hparam' value: { bool_value: false } } + group_name: 'group_2' + start_time_secs: 314159 + """, ), metadata.SESSION_END_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_END_INFO, """ - status: STATUS_SUCCESS - end_time_secs: 314164 - """, + status: STATUS_SUCCESS + end_time_secs: 314164 + """, ), }, "session_3": { metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_START_INFO, """ - hparams:{ key: 'initial_temp' value: { number_value: 280 } }, - hparams:{ key: 'final_temp' value: { number_value: 100 } }, - hparams:{ - key: 'string_hparam' value: { string_value: 'AAAAA' } - }, - hparams:{ key: 'bool_hparam' value: { bool_value: false } } - group_name: 'group_2' - start_time_secs: 314159 - """, + hparams:{ key: 'initial_temp' value: { number_value: 280 } }, + hparams:{ key: 'final_temp' value: { number_value: 100 } }, + hparams:{ + key: 'string_hparam' value: { string_value: 'AAAAA' } + }, + hparams:{ key: 'bool_hparam' value: { bool_value: false } } + group_name: 'group_2' + start_time_secs: 314159 + """, ), metadata.SESSION_END_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_END_INFO, """ - status: STATUS_FAILURE - end_time_secs: 314164 - """, + status: STATUS_FAILURE + end_time_secs: 314164 + """, ), }, "session_4": { metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_START_INFO, """ - hparams:{ key: 'initial_temp' value: { number_value: 300 } }, - hparams:{ key: 'final_temp' value: { number_value: 120 } }, - hparams:{ - key: 'string_hparam' value: { string_value: 'a string_3' } - }, - hparams:{ key: 'bool_hparam' value: { bool_value: true } } - hparams:{ - key: 'optional_string_hparam' value { string_value: 'BB' } - }, - group_name: 'group_3' - start_time_secs: 314159 - """, + hparams:{ key: 'initial_temp' value: { number_value: 300 } }, + hparams:{ key: 'final_temp' value: { number_value: 120 } }, + hparams:{ + key: 'string_hparam' value: { string_value: 'a string_3' } + }, + hparams:{ key: 'bool_hparam' value: { bool_value: true } } + hparams:{ + key: 'optional_string_hparam' value { string_value: 'BB' } + }, + group_name: 'group_3' + start_time_secs: 314159 + """, ), metadata.SESSION_END_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_END_INFO, """ - status: STATUS_UNKNOWN - end_time_secs: 314164 - """, + status: STATUS_UNKNOWN + end_time_secs: 314164 + """, ), }, "session_5": { metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_START_INFO, """ - hparams:{ key: 'initial_temp' value: { number_value: 280 } }, - hparams:{ key: 'final_temp' value: { number_value: 100 } }, - hparams:{ - key: 'string_hparam' value: { string_value: 'AAAAA' } - }, - hparams:{ key: 'bool_hparam' value: { bool_value: false } } - group_name: 'group_2' - start_time_secs: 314159 - """, + hparams:{ key: 'initial_temp' value: { number_value: 280 } }, + hparams:{ key: 'final_temp' value: { number_value: 100 } }, + hparams:{ + key: 'string_hparam' value: { string_value: 'AAAAA' } + }, + hparams:{ key: 'bool_hparam' value: { bool_value: false } } + group_name: 'group_2' + start_time_secs: 314159 + """, ), metadata.SESSION_END_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_END_INFO, """ - status: STATUS_SUCCESS - end_time_secs: 314164 - """, + status: STATUS_SUCCESS + end_time_secs: 314164 + """, ), }, } @@ -331,191 +331,193 @@ def test_empty_request(self): def test_no_filter_no_sort(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_AVG - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_AVG + """ response = self._run_handler(request) self.assertProtoEquals( """ - session_groups { - name: "group_1" - hparams { key: "bool_hparam" value { bool_value: true } } - hparams { key: "final_temp" value { number_value: 150.0 } } - hparams { key: "initial_temp" value { number_value: 270.0 } } - hparams { key: "string_hparam" value { string_value: "a string" } } - metric_values { - name { tag: "current_temp" } - value: 10 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { name { tag: "delta_temp" } value: 15 - training_step: 2 - wall_time_secs: 10.0 - } - metric_values { name { tag: "optional_metric" } value: 33 - training_step: 20 - wall_time_secs: 2.0 - } - sessions { - name: "session_1" - start_time_secs: 314159 - end_time_secs: 314164 - status: STATUS_SUCCESS - metric_values { - name { tag: "current_temp" } - value: 10 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { - name { tag: "delta_temp" } - value: 15 - training_step: 2 - wall_time_secs: 10.0 - } + session_groups { + name: "group_1" + hparams { key: "bool_hparam" value { bool_value: true } } + hparams { key: "final_temp" value { number_value: 150.0 } } + hparams { key: "initial_temp" value { number_value: 270.0 } } + hparams { key: "string_hparam" value { string_value: "a string" } } + metric_values { + name { tag: "current_temp" } + value: 10 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } value: 15 + training_step: 2 + wall_time_secs: 10.0 + } + metric_values { name { tag: "optional_metric" } value: 33 + training_step: 20 + wall_time_secs: 2.0 + } + sessions { + name: "session_1" + start_time_secs: 314159 + end_time_secs: 314164 + status: STATUS_SUCCESS + metric_values { + name { tag: "current_temp" } + value: 10 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { + name { tag: "delta_temp" } + value: 15 + training_step: 2 + wall_time_secs: 10.0 + } - metric_values { - name { tag: "optional_metric" } - value: 33 - training_step: 20 - wall_time_secs: 2.0 - } - } - } - session_groups { - name: "group_2" - hparams { key: "bool_hparam" value { bool_value: false } } - hparams { key: "final_temp" value { number_value: 100.0 } } - hparams { key: "initial_temp" value { number_value: 280.0 } } - hparams { key: "string_hparam" value { string_value: "AAAAA"}} - metric_values { - name { tag: "current_temp" } - value: 51.0 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { - name { tag: "delta_temp" } - value: 44.5 - training_step: 2 - wall_time_secs: 10.3333333 - } - sessions { - name: "session_2" - start_time_secs: 314159 - end_time_secs: 314164 - status: STATUS_SUCCESS - metric_values { - name { tag: "current_temp" } - value: 100 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { name { tag: "delta_temp" } - value: 150 - training_step: 3 - wall_time_secs: 11.0 - } - } - sessions { - name: "session_3" - start_time_secs: 314159 - end_time_secs: 314164 - status: STATUS_FAILURE - metric_values { - name { tag: "current_temp" } - value: 1.0 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { name { tag: "delta_temp" } - value: 1.5 - training_step: 2 - wall_time_secs: 10.0 + metric_values { + name { tag: "optional_metric" } + value: 33 + training_step: 20 + wall_time_secs: 2.0 + } + } } - } - sessions { - name: "session_5" - start_time_secs: 314159 - end_time_secs: 314164 - status: STATUS_SUCCESS - metric_values { - name { tag: "current_temp" } - value: 52.0 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { name { tag: "delta_temp" } - value: -18 - training_step: 2 - wall_time_secs: 10.0 - } - } - } - session_groups { - name: "group_3" - hparams { key: "bool_hparam" value { bool_value: true } } - hparams { key: "final_temp" value { number_value: 120.0 } } - hparams { key: "initial_temp" value { number_value: 300.0 } } - hparams { key: "string_hparam" value { string_value: "a string_3"}} - hparams { - key: 'optional_string_hparam' value { string_value: 'BB' } - } - metric_values { - name { tag: "current_temp" } - value: 101.0 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { name { tag: "delta_temp" } value: -151.0 - training_step: 2 - wall_time_secs: 10.0 - } - sessions { - name: "session_4" - start_time_secs: 314159 - end_time_secs: 314164 - status: STATUS_UNKNOWN - metric_values { - name { tag: "current_temp" } - value: 101.0 - training_step: 1 - wall_time_secs: 1.0 + session_groups { + name: "group_2" + hparams { key: "bool_hparam" value { bool_value: false } } + hparams { key: "final_temp" value { number_value: 100.0 } } + hparams { key: "initial_temp" value { number_value: 280.0 } } + hparams { key: "string_hparam" value { string_value: "AAAAA"}} + metric_values { + name { tag: "current_temp" } + value: 51.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { + name { tag: "delta_temp" } + value: 44.5 + training_step: 2 + wall_time_secs: 10.3333333 + } + sessions { + name: "session_2" + start_time_secs: 314159 + end_time_secs: 314164 + status: STATUS_SUCCESS + metric_values { + name { tag: "current_temp" } + value: 100 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } + value: 150 + training_step: 3 + wall_time_secs: 11.0 + } + } + sessions { + name: "session_3" + start_time_secs: 314159 + end_time_secs: 314164 + status: STATUS_FAILURE + metric_values { + name { tag: "current_temp" } + value: 1.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } + value: 1.5 + training_step: 2 + wall_time_secs: 10.0 + } + } + sessions { + name: "session_5" + start_time_secs: 314159 + end_time_secs: 314164 + status: STATUS_SUCCESS + metric_values { + name { tag: "current_temp" } + value: 52.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } + value: -18 + training_step: 2 + wall_time_secs: 10.0 + } + } } - metric_values { name { tag: "delta_temp" } value: -151.0 - training_step: 2 - wall_time_secs: 10.0 + session_groups { + name: "group_3" + hparams { key: "bool_hparam" value { bool_value: true } } + hparams { key: "final_temp" value { number_value: 120.0 } } + hparams { key: "initial_temp" value { number_value: 300.0 } } + hparams { key: "string_hparam" value { string_value: "a string_3"}} + hparams { + key: 'optional_string_hparam' value { string_value: 'BB' } + } + metric_values { + name { tag: "current_temp" } + value: 101.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } value: -151.0 + training_step: 2 + wall_time_secs: 10.0 + } + sessions { + name: "session_4" + start_time_secs: 314159 + end_time_secs: 314164 + status: STATUS_UNKNOWN + metric_values { + name { tag: "current_temp" } + value: 101.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } value: -151.0 + training_step: 2 + wall_time_secs: 10.0 + } + } } - } - } - total_size: 3 - """, + total_size: 3 + """, response, ) def test_no_allowed_statuses(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [] - aggregation_type: AGGREGATION_AVG - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [] + aggregation_type: AGGREGATION_AVG + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups), 0) def test_some_allowed_statuses(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, STATUS_SUCCESS] - aggregation_type: AGGREGATION_AVG - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [STATUS_UNKNOWN, STATUS_SUCCESS] + aggregation_type: AGGREGATION_AVG + """ response = self._run_handler(request) self.assertEquals( _reduce_to_names(response.session_groups), @@ -528,11 +530,11 @@ def test_some_allowed_statuses(self): def test_some_allowed_statuses_empty_groups(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_FAILURE] - aggregation_type: AGGREGATION_AVG - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [STATUS_FAILURE] + aggregation_type: AGGREGATION_AVG + """ response = self._run_handler(request) self.assertEquals( _reduce_to_names(response.session_groups), @@ -541,182 +543,220 @@ def test_some_allowed_statuses_empty_groups(self): def test_aggregation_median_current_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MEDIAN - aggregation_metric: { tag: "current_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MEDIAN + aggregation_metric: { tag: "current_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 52.0 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 52.0 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: -18.0 - training_step: 2 - wall_time_secs: 10.0""", + """ + name { tag: "delta_temp" } + value: -18.0 + training_step: 2 + wall_time_secs: 10.0 + """, response.session_groups[1].metric_values[1], ) def test_aggregation_median_delta_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MEDIAN - aggregation_metric: { tag: "delta_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MEDIAN + aggregation_metric: { tag: "delta_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 1.0 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 1.0 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: 1.5 - training_step: 2 - wall_time_secs: 10.0""", + """ + name { tag: "delta_temp" } + value: 1.5 + training_step: 2 + wall_time_secs: 10.0 + """, response.session_groups[1].metric_values[1], ) def test_aggregation_max_current_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MAX - aggregation_metric: { tag: "current_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MAX + aggregation_metric: { tag: "current_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 100 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 100 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: 150.0 - training_step: 3 - wall_time_secs: 11.0""", + """ + name { tag: "delta_temp" } + value: 150.0 + training_step: 3 + wall_time_secs: 11.0 + """, response.session_groups[1].metric_values[1], ) def test_aggregation_max_delta_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MAX - aggregation_metric: { tag: "delta_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MAX + aggregation_metric: { tag: "delta_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 100.0 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 100.0 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: 150.0 - training_step: 3 - wall_time_secs: 11.0""", + """ + name { tag: "delta_temp" } + value: 150.0 + training_step: 3 + wall_time_secs: 11.0 + """, response.session_groups[1].metric_values[1], ) def test_aggregation_min_current_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MIN - aggregation_metric: { tag: "current_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MIN + aggregation_metric: { tag: "current_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 1.0 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 1.0 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: 1.5 - training_step: 2 - wall_time_secs: 10.0""", + """ + name { tag: "delta_temp" } + value: 1.5 + training_step: 2 + wall_time_secs: 10.0 + """, response.session_groups[1].metric_values[1], ) def test_aggregation_min_delta_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MIN - aggregation_metric: { tag: "delta_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MIN + aggregation_metric: { tag: "delta_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 52.0 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 52.0 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: -18.0 - training_step: 2 - wall_time_secs: 10.0""", + """ + name { tag: "delta_temp" } + value: -18.0 + training_step: 2 + wall_time_secs: 10.0 + """, response.session_groups[1].metric_values[1], ) def test_no_filter_no_sort_partial_slice(self): self._verify_handler( request=""" - start_index: 1 - slice_size: 1 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - """, + start_index: 1 + slice_size: 1 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + """, expected_session_group_names=["group_2"], expected_total_size=3, ) @@ -724,17 +764,19 @@ def test_no_filter_no_sort_partial_slice(self): def test_no_filter_exclude_missing_values(self): self._verify_handler( request=""" - col_params: { - metric: { tag: 'optional_metric' } - exclude_missing_values: true - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'optional_metric' } + exclude_missing_values: true + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1"], expected_total_size=1, ) @@ -742,34 +784,38 @@ def test_no_filter_exclude_missing_values(self): def test_filter_regexp(self): self._verify_handler( request=""" - col_params: { - hparam: 'string_hparam' - filter_regexp: 'AA' - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'string_hparam' + filter_regexp: 'AA' + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_2"], expected_total_size=1, ) # Test filtering out all session groups. self._verify_handler( request=""" - col_params: { - hparam: 'string_hparam' - filter_regexp: 'a string_100' - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'string_hparam' + filter_regexp: 'a string_100' + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=[], expected_total_size=0, ) @@ -777,17 +823,19 @@ def test_filter_regexp(self): def test_filter_interval(self): self._verify_handler( request=""" - col_params: { - hparam: 'initial_temp' - filter_interval: { min_value: 270 max_value: 282 } - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'initial_temp' + filter_interval: { min_value: 270 max_value: 282 } + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1", "group_2"], expected_total_size=2, ) @@ -795,18 +843,20 @@ def test_filter_interval(self): def test_filter_discrete_set(self): self._verify_handler( request=""" - col_params: { - metric: { tag: 'current_temp' } - filter_discrete: { values: [{ number_value: 101.0 }, - { number_value: 10.0 }] } - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'current_temp' } + filter_discrete: { values: [{ number_value: 101.0 }, + { number_value: 10.0 }] } + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1", "group_3"], expected_total_size=2, ) @@ -814,22 +864,24 @@ def test_filter_discrete_set(self): def test_filter_multiple_columns(self): self._verify_handler( request=""" - col_params: { - metric: { tag: 'current_temp' } - filter_discrete: { values: [{ number_value: 101.0 }, - { number_value: 10.0 }] } - } - col_params: { - hparam: 'initial_temp' - filter_interval: { min_value: 270 max_value: 282 } - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'current_temp' } + filter_discrete: { values: [{ number_value: 101.0 }, + { number_value: 10.0 }] } + } + col_params: { + hparam: 'initial_temp' + filter_interval: { min_value: 270 max_value: 282 } + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1"], expected_total_size=1, ) @@ -837,53 +889,59 @@ def test_filter_multiple_columns(self): def test_filter_single_column_with_missing_values(self): self._verify_handler( request=""" - col_params: { - hparam: 'optional_string_hparam' - filter_regexp: 'B' - exclude_missing_values: true - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'optional_string_hparam' + filter_regexp: 'B' + exclude_missing_values: true + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_3"], expected_total_size=1, ) self._verify_handler( request=""" - col_params: { - hparam: 'optional_string_hparam' - filter_regexp: 'B' - exclude_missing_values: false - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'optional_string_hparam' + filter_regexp: 'B' + exclude_missing_values: false + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1", "group_2", "group_3"], expected_total_size=3, ) self._verify_handler( request=""" - col_params: { - metric: { tag: 'optional_metric' } - filter_discrete: { values: { number_value: 33.0 } } - exclude_missing_values: true - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'optional_metric' } + filter_discrete: { values: { number_value: 33.0 } } + exclude_missing_values: true + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1"], expected_total_size=1, ) @@ -891,50 +949,56 @@ def test_filter_single_column_with_missing_values(self): def test_sort_one_column(self): self._verify_handler( request=""" - col_params: { - metric: { tag: 'delta_temp' } - order: ORDER_ASC - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'delta_temp' } + order: ORDER_ASC + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_3", "group_1", "group_2"], expected_total_size=3, ) self._verify_handler( request=""" - col_params: { - hparam: 'string_hparam' - order: ORDER_ASC - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'string_hparam' + order: ORDER_ASC + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_2", "group_1", "group_3"], expected_total_size=3, ) # Test descending order. self._verify_handler( request=""" - col_params: { - hparam: 'string_hparam' - order: ORDER_DESC - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'string_hparam' + order: ORDER_DESC + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_3", "group_1", "group_2"], expected_total_size=3, ) @@ -942,42 +1006,46 @@ def test_sort_one_column(self): def test_sort_multiple_columns(self): self._verify_handler( request=""" - col_params: { - hparam: 'bool_hparam' - order: ORDER_ASC - } - col_params: { - metric: { tag: 'delta_temp' } - order: ORDER_ASC - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'bool_hparam' + order: ORDER_ASC + } + col_params: { + metric: { tag: 'delta_temp' } + order: ORDER_ASC + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_2", "group_3", "group_1"], expected_total_size=3, ) # Primary key in descending order. Secondary key in ascending order. self._verify_handler( request=""" - col_params: { - hparam: 'bool_hparam' - order: ORDER_DESC - } - col_params: { - metric: { tag: 'delta_temp' } - order: ORDER_ASC - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'bool_hparam' + order: ORDER_DESC + } + col_params: { + metric: { tag: 'delta_temp' } + order: ORDER_ASC + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_3", "group_1", "group_2"], expected_total_size=3, ) @@ -985,69 +1053,77 @@ def test_sort_multiple_columns(self): def test_sort_one_column_with_missing_values(self): self._verify_handler( request=""" - col_params: { - metric: { tag: 'optional_metric' } - order: ORDER_ASC - missing_values_first: false - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'optional_metric' } + order: ORDER_ASC + missing_values_first: false + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1", "group_2", "group_3"], expected_total_size=3, ) self._verify_handler( request=""" - col_params: { - metric: { tag: 'optional_metric' } - order: ORDER_ASC - missing_values_first: true - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'optional_metric' } + order: ORDER_ASC + missing_values_first: true + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_2", "group_3", "group_1"], expected_total_size=3, ) self._verify_handler( request=""" - col_params: { - hparam: 'optional_string_hparam' - order: ORDER_ASC - missing_values_first: false - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'optional_string_hparam' + order: ORDER_ASC + missing_values_first: false + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_3", "group_1", "group_2"], expected_total_size=3, ) self._verify_handler( request=""" - col_params: { - hparam: 'optional_string_hparam' - order: ORDER_ASC - missing_values_first: true - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'optional_string_hparam' + order: ORDER_ASC + missing_values_first: true + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1", "group_2", "group_3"], expected_total_size=3, ) diff --git a/tensorboard/plugins/hparams/summary_v2.py b/tensorboard/plugins/hparams/summary_v2.py index 3f85ac57b2..19a4421e0d 100644 --- a/tensorboard/plugins/hparams/summary_v2.py +++ b/tensorboard/plugins/hparams/summary_v2.py @@ -538,21 +538,22 @@ def __init__( dataset_type=None, ): """ - Args: - tag: The tag name of the scalar summary that corresponds to this - metric (as a `str`). - group: An optional string listing the subdirectory under the - session's log directory containing summaries for this metric. - For instance, if summaries for training runs are written to - events files in `ROOT_LOGDIR/SESSION_ID/train`, then `group` - should be `"train"`. Defaults to the empty string: i.e., - summaries are expected to be written to the session logdir. - display_name: An optional human-readable display name. - description: An optional Markdown string with a human-readable - description of this metric, to appear in TensorBoard. - dataset_type: Either `Metric.TRAINING` or `Metric.VALIDATION`, or - `None`. - """ + + Args: + tag: The tag name of the scalar summary that corresponds to this + metric (as a `str`). + group: An optional string listing the subdirectory under the + session's log directory containing summaries for this metric. + For instance, if summaries for training runs are written to + events files in `ROOT_LOGDIR/SESSION_ID/train`, then `group` + should be `"train"`. Defaults to the empty string: i.e., + summaries are expected to be written to the session logdir. + display_name: An optional human-readable display name. + description: An optional Markdown string with a human-readable + description of this metric, to appear in TensorBoard. + dataset_type: Either `Metric.TRAINING` or `Metric.VALIDATION`, or + `None`. + """ self._tag = tag self._group = group self._display_name = display_name diff --git a/tensorboard/plugins/hparams/summary_v2_test.py b/tensorboard/plugins/hparams/summary_v2_test.py index a0e10a9a11..0eafde2611 100644 --- a/tensorboard/plugins/hparams/summary_v2_test.py +++ b/tensorboard/plugins/hparams/summary_v2_test.py @@ -87,13 +87,13 @@ def setUp(self): self.expected_session_start_pb = plugin_data_pb2.SessionStartInfo() text_format.Merge( """ - hparams { key: "learning_rate" value { number_value: 0.02 } } - hparams { key: "dense_layers" value { number_value: 5 } } - hparams { key: "optimizer" value { string_value: "adam" } } - hparams { key: "who_knows_what" value { string_value: "???" } } - hparams { key: "magic" value { bool_value: true } } - hparams { key: "dropout" value { number_value: 0.3 } } - """, + hparams { key: "learning_rate" value { number_value: 0.02 } } + hparams { key: "dense_layers" value { number_value: 5 } } + hparams { key: "optimizer" value { string_value: "adam" } } + hparams { key: "who_knows_what" value { string_value: "???" } } + hparams { key: "magic" value { bool_value: true } } + hparams { key: "dropout" value { number_value: 0.3 } } + """, self.expected_session_start_pb, ) self.expected_session_start_pb.group_name = self.trial_id @@ -306,74 +306,74 @@ def setUp(self): self.expected_experiment_pb = api_pb2.Experiment() text_format.Merge( """ - time_created_secs: 1555624767.0 - hparam_infos { - name: "learning_rate" - type: DATA_TYPE_FLOAT64 - domain_interval { - min_value: 0.01 - max_value: 0.1 - } - } - hparam_infos { - name: "dense_layers" - type: DATA_TYPE_FLOAT64 - domain_interval { - min_value: 2 - max_value: 7 - } - } - hparam_infos { - name: "optimizer" - type: DATA_TYPE_STRING - domain_discrete { - values { - string_value: "adam" + time_created_secs: 1555624767.0 + hparam_infos { + name: "learning_rate" + type: DATA_TYPE_FLOAT64 + domain_interval { + min_value: 0.01 + max_value: 0.1 + } } - values { - string_value: "sgd" + hparam_infos { + name: "dense_layers" + type: DATA_TYPE_FLOAT64 + domain_interval { + min_value: 2 + max_value: 7 + } } - } - } - hparam_infos { - name: "who_knows_what" - } - hparam_infos { - name: "magic" - type: DATA_TYPE_BOOL - display_name: "~*~ Magic ~*~" - description: "descriptive" - domain_discrete { - values { - bool_value: false + hparam_infos { + name: "optimizer" + type: DATA_TYPE_STRING + domain_discrete { + values { + string_value: "adam" + } + values { + string_value: "sgd" + } + } } - values { - bool_value: true + hparam_infos { + name: "who_knows_what" } - } - } - metric_infos { - name { - tag: "samples_per_second" - } - } - metric_infos { - name { - group: "train" - tag: "batch_loss" - } - display_name: "loss (train)" - } - metric_infos { - name { - group: "validation" - tag: "epoch_accuracy" - } - display_name: "accuracy (val.)" - description: "Accuracy on the _validation_ dataset." - dataset_type: DATASET_VALIDATION - } - """, + hparam_infos { + name: "magic" + type: DATA_TYPE_BOOL + display_name: "~*~ Magic ~*~" + description: "descriptive" + domain_discrete { + values { + bool_value: false + } + values { + bool_value: true + } + } + } + metric_infos { + name { + tag: "samples_per_second" + } + } + metric_infos { + name { + group: "train" + tag: "batch_loss" + } + display_name: "loss (train)" + } + metric_infos { + name { + group: "validation" + tag: "epoch_accuracy" + } + display_name: "accuracy (val.)" + description: "Accuracy on the _validation_ dataset." + dataset_type: DATASET_VALIDATION + } + """, self.expected_experiment_pb, ) diff --git a/tensorboard/plugins/image/images_plugin.py b/tensorboard/plugins/image/images_plugin.py index a07b83d37e..a5b2901814 100644 --- a/tensorboard/plugins/image/images_plugin.py +++ b/tensorboard/plugins/image/images_plugin.py @@ -83,11 +83,11 @@ def is_active(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT 1 - FROM Tags - WHERE Tags.plugin_name = ? - LIMIT 1 - """, + SELECT 1 + FROM Tags + WHERE Tags.plugin_name = ? + LIMIT 1 + """, (metadata.PLUGIN_NAME,), ) return bool(list(cursor)) @@ -105,25 +105,25 @@ def _index_impl(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - Runs.run_name, - Tags.tag_name, - Tags.display_name, - Descriptions.description, - /* Subtract 2 for leading width and height elements. */ - MAX(CAST (Tensors.shape AS INT)) - 2 AS samples - FROM Tags - JOIN Runs USING (run_id) - JOIN Tensors ON Tags.tag_id = Tensors.series - LEFT JOIN Descriptions ON Tags.tag_id = Descriptions.id - WHERE Tags.plugin_name = :plugin - /* Shape should correspond to a rank-1 tensor. */ - AND NOT INSTR(Tensors.shape, ',') - /* Required to use TensorSeriesStepIndex. */ - AND Tensors.step IS NOT NULL - GROUP BY Tags.tag_id - HAVING samples >= 1 - """, + SELECT + Runs.run_name, + Tags.tag_name, + Tags.display_name, + Descriptions.description, + /* Subtract 2 for leading width and height elements. */ + MAX(CAST (Tensors.shape AS INT)) - 2 AS samples + FROM Tags + JOIN Runs USING (run_id) + JOIN Tensors ON Tags.tag_id = Tensors.series + LEFT JOIN Descriptions ON Tags.tag_id = Descriptions.id + WHERE Tags.plugin_name = :plugin + /* Shape should correspond to a rank-1 tensor. */ + AND NOT INSTR(Tensors.shape, ',') + /* Required to use TensorSeriesStepIndex. */ + AND Tensors.step IS NOT NULL + GROUP BY Tags.tag_id + HAVING samples >= 1 + """, {"plugin": metadata.PLUGIN_NAME}, ) result = collections.defaultdict(dict) @@ -209,30 +209,30 @@ def _image_response_for_run(self, run, tag, sample): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - computed_time, - step, - CAST (T0.data AS INT) AS width, - CAST (T1.data AS INT) AS height - FROM Tensors - JOIN TensorStrings AS T0 - ON Tensors.rowid = T0.tensor_rowid - JOIN TensorStrings AS T1 - ON Tensors.rowid = T1.tensor_rowid - WHERE - series = ( - SELECT tag_id - FROM Runs - CROSS JOIN Tags USING (run_id) - WHERE Runs.run_name = :run AND Tags.tag_name = :tag) - AND step IS NOT NULL - AND dtype = :dtype - /* Should be n-vector, n >= 3: [width, height, samples...] */ - AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3) - AND T0.idx = 0 - AND T1.idx = 1 - ORDER BY step - """, + SELECT + computed_time, + step, + CAST (T0.data AS INT) AS width, + CAST (T1.data AS INT) AS height + FROM Tensors + JOIN TensorStrings AS T0 + ON Tensors.rowid = T0.tensor_rowid + JOIN TensorStrings AS T1 + ON Tensors.rowid = T1.tensor_rowid + WHERE + series = ( + SELECT tag_id + FROM Runs + CROSS JOIN Tags USING (run_id) + WHERE Runs.run_name = :run AND Tags.tag_name = :tag) + AND step IS NOT NULL + AND dtype = :dtype + /* Should be n-vector, n >= 3: [width, height, samples...] */ + AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3) + AND T0.idx = 0 + AND T1.idx = 1 + ORDER BY step + """, {"run": run, "tag": tag, "dtype": tf.string.as_datatype_enum}, ) return [ @@ -320,30 +320,30 @@ def _get_individual_image(self, run, tag, index, sample): db = self._db_connection_provider() cursor = db.execute( """ - SELECT data - FROM TensorStrings - WHERE - /* Skip first 2 elements which are width and height. */ - idx = 2 + :sample - AND tensor_rowid = ( - SELECT rowid - FROM Tensors - WHERE - series = ( - SELECT tag_id - FROM Runs - CROSS JOIN Tags USING (run_id) - WHERE - Runs.run_name = :run - AND Tags.tag_name = :tag) - AND step IS NOT NULL - AND dtype = :dtype - /* Should be n-vector, n >= 3: [width, height, samples...] */ - AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3) - ORDER BY step - LIMIT 1 - OFFSET :index) - """, + SELECT data + FROM TensorStrings + WHERE + /* Skip first 2 elements which are width and height. */ + idx = 2 + :sample + AND tensor_rowid = ( + SELECT rowid + FROM Tensors + WHERE + series = ( + SELECT tag_id + FROM Runs + CROSS JOIN Tags USING (run_id) + WHERE + Runs.run_name = :run + AND Tags.tag_name = :tag) + AND step IS NOT NULL + AND dtype = :dtype + /* Should be n-vector, n >= 3: [width, height, samples...] */ + AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3) + ORDER BY step + LIMIT 1 + OFFSET :index) + """, { "run": run, "tag": tag, diff --git a/tensorboard/plugins/interactive_inference/witwidget/notebook/base.py b/tensorboard/plugins/interactive_inference/witwidget/notebook/base.py index 98a9ee0454..1f22f6ea72 100644 --- a/tensorboard/plugins/interactive_inference/witwidget/notebook/base.py +++ b/tensorboard/plugins/interactive_inference/witwidget/notebook/base.py @@ -147,7 +147,8 @@ def wrapped_compare_custom_predict_fn(examples): def _get_element_html(self): return """ - """ + + """ def set_examples(self, examples): """Sets the examples shown in WIT. diff --git a/tensorboard/plugins/interactive_inference/witwidget/notebook/colab/wit.py b/tensorboard/plugins/interactive_inference/witwidget/notebook/colab/wit.py index 8a1e0c6243..18657d8c31 100644 --- a/tensorboard/plugins/interactive_inference/witwidget/notebook/colab/wit.py +++ b/tensorboard/plugins/interactive_inference/witwidget/notebook/colab/wit.py @@ -279,7 +279,8 @@ def __init__(self, config_builder, height=1000): def _get_element_html(self): return """ - """ + -1 - ORDER BY Tensors.step - """ + SELECT + Runs.run_name, + Tensors.step, + Tensors.computed_time, + Tensors.data, + Tensors.dtype, + Tensors.shape, + Tags.plugin_data + FROM Tensors + JOIN Tags + ON Tensors.series = Tags.tag_id + JOIN Runs + ON Tags.run_id = Runs.run_id + WHERE + Runs.run_name IN (%s) + AND Tags.tag_name = ? + AND Tags.plugin_name = ? + AND Tensors.step > -1 + ORDER BY Tensors.step + """ % ",".join(["?"] * len(runs)), runs + [tag, metadata.PLUGIN_NAME], ) @@ -212,16 +212,16 @@ def tags_impl(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - Tags.tag_name, - Tags.display_name, - Runs.run_name - FROM Tags - JOIN Runs - ON Tags.run_id = Runs.run_id - WHERE - Tags.plugin_name = ? - """, + SELECT + Tags.tag_name, + Tags.display_name, + Runs.run_name + FROM Tags + JOIN Runs + ON Tags.run_id = Runs.run_id + WHERE + Tags.plugin_name = ? + """, (metadata.PLUGIN_NAME,), ) result = {} @@ -282,26 +282,26 @@ def available_time_entries_impl(self): # For each run, pick a tag. cursor = db.execute( """ - SELECT - TagPickingTable.run_name, - Tensors.step, - Tensors.computed_time - FROM (/* For each run, pick any tag. */ - SELECT - Runs.run_id AS run_id, - Runs.run_name AS run_name, - Tags.tag_id AS tag_id - FROM Runs - JOIN Tags - ON Tags.run_id = Runs.run_id - WHERE - Tags.plugin_name = ? - GROUP BY Runs.run_id) AS TagPickingTable - JOIN Tensors - ON Tensors.series = TagPickingTable.tag_id - WHERE Tensors.step IS NOT NULL - ORDER BY Tensors.step - """, + SELECT + TagPickingTable.run_name, + Tensors.step, + Tensors.computed_time + FROM (/* For each run, pick any tag. */ + SELECT + Runs.run_id AS run_id, + Runs.run_name AS run_name, + Tags.tag_id AS tag_id + FROM Runs + JOIN Tags + ON Tags.run_id = Runs.run_id + WHERE + Tags.plugin_name = ? + GROUP BY Runs.run_id) AS TagPickingTable + JOIN Tensors + ON Tensors.series = TagPickingTable.tag_id + WHERE Tensors.step IS NOT NULL + ORDER BY Tensors.step + """, (metadata.PLUGIN_NAME,), ) for (run, step, wall_time) in cursor: @@ -372,11 +372,11 @@ def is_active(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT 1 - FROM Tags - WHERE Tags.plugin_name = ? - LIMIT 1 - """, + SELECT 1 + FROM Tags + WHERE Tags.plugin_name = ? + LIMIT 1 + """, (metadata.PLUGIN_NAME,), ) return bool(list(cursor)) diff --git a/tensorboard/plugins/profile/trace_events_json_test.py b/tensorboard/plugins/profile/trace_events_json_test.py index 1157a5c3c7..7839eb897f 100644 --- a/tensorboard/plugins/profile/trace_events_json_test.py +++ b/tensorboard/plugins/profile/trace_events_json_test.py @@ -39,39 +39,39 @@ def testJsonConversion(self): self.assertEqual( self.convert( """ - devices { key: 2 value { - name: 'D2' - device_id: 2 - resources { key: 2 value { - resource_id: 2 - name: 'R2.2' - } } - } } - devices { key: 1 value { - name: 'D1' - device_id: 1 - resources { key: 2 value { - resource_id: 1 - name: 'R1.2' - } } - } } + devices { key: 2 value { + name: 'D2' + device_id: 2 + resources { key: 2 value { + resource_id: 2 + name: 'R2.2' + } } + } } + devices { key: 1 value { + name: 'D1' + device_id: 1 + resources { key: 2 value { + resource_id: 1 + name: 'R1.2' + } } + } } - trace_events { - device_id: 1 - resource_id: 2 - name: "E1.2.1" - timestamp_ps: 100000 - duration_ps: 10000 - args { key: "label" value: "E1.2.1" } - args { key: "extra" value: "extra info" } - } - trace_events { - device_id: 2 - resource_id: 2 - name: "E2.2.1" - timestamp_ps: 105000 - } - """ + trace_events { + device_id: 1 + resource_id: 2 + name: "E1.2.1" + timestamp_ps: 100000 + duration_ps: 10000 + args { key: "label" value: "E1.2.1" } + args { key: "extra" value: "extra info" } + } + trace_events { + device_id: 2 + resource_id: 2 + name: "E2.2.1" + timestamp_ps: 105000 + } + """ ), dict( displayTimeUnit="ns", diff --git a/tensorboard/plugins/scalar/scalars_plugin.py b/tensorboard/plugins/scalar/scalars_plugin.py index 4004abbaed..2564ab4273 100644 --- a/tensorboard/plugins/scalar/scalars_plugin.py +++ b/tensorboard/plugins/scalar/scalars_plugin.py @@ -84,12 +84,12 @@ def is_active(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - 1 - FROM Tags - WHERE Tags.plugin_name = ? - LIMIT 1 - """, + SELECT + 1 + FROM Tags + WHERE Tags.plugin_name = ? + LIMIT 1 + """, (metadata.PLUGIN_NAME,), ) return bool(list(cursor)) @@ -128,16 +128,16 @@ def index_impl(self, experiment=None): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - Tags.tag_name, - Tags.display_name, - Runs.run_name - FROM Tags - JOIN Runs - ON Tags.run_id = Runs.run_id - WHERE - Tags.plugin_name = ? - """, + SELECT + Tags.tag_name, + Tags.display_name, + Runs.run_name + FROM Tags + JOIN Runs + ON Tags.run_id = Runs.run_id + WHERE + Tags.plugin_name = ? + """, (metadata.PLUGIN_NAME,), ) result = collections.defaultdict(dict) @@ -193,27 +193,27 @@ def scalars_impl(self, tag, run, experiment, output_format): # placeholder rows en masse. The check for step filters out those rows. cursor = db.execute( """ - SELECT - Tensors.step, - Tensors.computed_time, - Tensors.data, - Tensors.dtype - FROM Tensors - JOIN Tags - ON Tensors.series = Tags.tag_id - JOIN Runs - ON Tags.run_id = Runs.run_id - WHERE - /* For backwards compatibility, ignore the experiment id - for matching purposes if it is empty. */ - (:exp == '' OR Runs.experiment_id == CAST(:exp AS INT)) - AND Runs.run_name = :run - AND Tags.tag_name = :tag - AND Tags.plugin_name = :plugin - AND Tensors.shape = '' - AND Tensors.step > -1 - ORDER BY Tensors.step - """, + SELECT + Tensors.step, + Tensors.computed_time, + Tensors.data, + Tensors.dtype + FROM Tensors + JOIN Tags + ON Tensors.series = Tags.tag_id + JOIN Runs + ON Tags.run_id = Runs.run_id + WHERE + /* For backwards compatibility, ignore the experiment id + for matching purposes if it is empty. */ + (:exp == '' OR Runs.experiment_id == CAST(:exp AS INT)) + AND Runs.run_name = :run + AND Tags.tag_name = :tag + AND Tags.plugin_name = :plugin + AND Tensors.shape = '' + AND Tensors.step > -1 + ORDER BY Tensors.step + """, dict( exp=experiment, run=run, diff --git a/tensorboard/plugins/text/text_plugin_test.py b/tensorboard/plugins/text/text_plugin_test.py index 52c744ca71..5f77dbb1b2 100644 --- a/tensorboard/plugins/text/text_plugin_test.py +++ b/tensorboard/plugins/text/text_plugin_test.py @@ -110,22 +110,23 @@ def testText(self): table, textwrap.dedent( """\ - - - - - - - - - - - - - - - -

one

two

three

four

""" + + + + + + + + + + + + + + + +

one

two

three

four

+ """.rstrip() ), ) @@ -133,41 +134,43 @@ def testTableGeneration(self): array2d = np.array([["one", "two"], ["three", "four"]]) expected_table = textwrap.dedent( """\ - - - - - - - - - - - -
onetwo
threefour
""" + + + + + + + + + + + +
onetwo
threefour
+ """.rstrip() ) self.assertEqual(text_plugin.make_table(array2d), expected_table) expected_table_with_headers = textwrap.dedent( """\ - - - - - - - - - - - - - - - - - -
c1c2
onetwo
threefour
""" + + + + + + + + + + + + + + + + + +
c1c2
onetwo
threefour
+ """.rstrip() ) actual_with_headers = text_plugin.make_table( @@ -178,54 +181,56 @@ def testTableGeneration(self): array_1d = np.array(["one", "two", "three", "four", "five"]) expected_1d = textwrap.dedent( """\ - - - - - - - - - - - - - - - - - - -
one
two
three
four
five
""" + + + + + + + + + + + + + + + + + + +
one
two
three
four
five
+ """.rstrip() ) self.assertEqual(text_plugin.make_table(array_1d), expected_1d) expected_1d_with_headers = textwrap.dedent( """\ - - - - - - - - - - - - - - - - - - - - - - - -
X
one
two
three
four
five
""" + + + + + + + + + + + + + + + + + + + + + + + +
X
one
two
three
four
five
+ """.rstrip() ) actual_1d_with_headers = text_plugin.make_table(array_1d, headers=["X"]) self.assertEqual(actual_1d_with_headers, expected_1d_with_headers) @@ -301,34 +306,36 @@ def test_text_array_to_html(self): vector = np.array(["foo", "bar"]) vector_expected = textwrap.dedent( """\ - - - - - - - - - -

foo

bar

""" + + + + + + + + + +

foo

bar

+ """.rstrip() ) self.assertEqual(convert(vector), vector_expected) d2 = np.array([["foo", "bar"], ["zoink", "zod"]]) d2_expected = textwrap.dedent( """\ - - - - - - - - - - - -

foo

bar

zoink

zod

""" + + + + + + + + + + + +

foo

bar

zoink

zod

+ """.rstrip() ) self.assertEqual(convert(d2), d2_expected) @@ -344,18 +351,19 @@ def test_text_array_to_html(self): ) d3_expected = warning + textwrap.dedent( """\ - - - - - - - - - - - -

foo

bar

zoink

zod

""" + + + + + + + + + + + +

foo

bar

zoink

zod

+ """.rstrip() ) self.assertEqual(convert(d3), d3_expected) diff --git a/tensorboard/scripts/generate_testdata.py b/tensorboard/scripts/generate_testdata.py index a10a4b6be8..d6f5d3f592 100644 --- a/tensorboard/scripts/generate_testdata.py +++ b/tensorboard/scripts/generate_testdata.py @@ -33,17 +33,13 @@ flags.DEFINE_string( - "target", - None, - """The directory where serialized data -will be written""", + "target", None, """The directory where serialized data will be written""", ) flags.DEFINE_boolean( "overwrite", False, - """Whether to remove and overwrite -TARGET if it already exists.""", + """Whether to remove and overwrite TARGET if it already exists.""", ) FLAGS = flags.FLAGS diff --git a/tensorboard/tools/diagnose_tensorboard.py b/tensorboard/tools/diagnose_tensorboard.py index 66d35fe98c..d515ab103e 100644 --- a/tensorboard/tools/diagnose_tensorboard.py +++ b/tensorboard/tools/diagnose_tensorboard.py @@ -218,14 +218,14 @@ def installed_packages(): if found_conflict: preamble = reflow( """ - Conflicting package installations found. Depending on the order - of installations and uninstallations, behavior may be undefined. - Please uninstall ALL versions of TensorFlow and TensorBoard, - then reinstall ONLY the desired version of TensorFlow, which - will transitively pull in the proper version of TensorBoard. (If - you use TensorBoard without TensorFlow, just reinstall the - appropriate version of TensorBoard directly.) - """ + Conflicting package installations found. Depending on the order + of installations and uninstallations, behavior may be undefined. + Please uninstall ALL versions of TensorFlow and TensorBoard, + then reinstall ONLY the desired version of TensorFlow, which + will transitively pull in the proper version of TensorBoard. (If + you use TensorBoard without TensorFlow, just reinstall the + appropriate version of TensorBoard directly.) + """ ) packages_to_uninstall = sorted( frozenset().union(*expect_unique) & packages_set @@ -301,24 +301,24 @@ def readable_fqdn(): if is_non_ascii: message = reflow( """ - Your computer's hostname, %r, contains bytes outside of the - printable ASCII range. Some versions of Python have trouble - working with such names (https://bugs.python.org/issue26227). - Consider changing to a hostname that only contains printable - ASCII bytes. - """ + Your computer's hostname, %r, contains bytes outside of the + printable ASCII range. Some versions of Python have trouble + working with such names (https://bugs.python.org/issue26227). + Consider changing to a hostname that only contains printable + ASCII bytes. + """ % (binary_hostname,) ) yield Suggestion("Use an ASCII hostname", message) else: message = reflow( """ - Python can't read your computer's hostname, %r. This can occur - if the hostname contains non-ASCII bytes - (https://bugs.python.org/issue26227). Consider changing your - hostname, rebooting your machine, and rerunning this diagnosis - script to see if the problem is resolved. - """ + Python can't read your computer's hostname, %r. This can occur + if the hostname contains non-ASCII bytes + (https://bugs.python.org/issue26227). Consider changing your + hostname, rebooting your machine, and rerunning this diagnosis + script to see if the problem is resolved. + """ % (binary_hostname,) ) yield Suggestion("Use a simpler hostname", message) @@ -345,11 +345,11 @@ def stat_tensorboardinfo(): if stat_result.st_mode & 0o777 != 0o777: preamble = reflow( """ - The ".tensorboard-info" directory was created by an old version - of TensorBoard, and its permissions are not set correctly; see - issue #2010. Change that directory to be world-accessible (may - require superuser privilege): - """ + The ".tensorboard-info" directory was created by an old version + of TensorBoard, and its permissions are not set correctly; see + issue #2010. Change that directory to be world-accessible (may + require superuser privilege): + """ ) # This error should only appear on Unices, so it's okay to use # Unix-specific utilities and shell syntax. @@ -393,22 +393,22 @@ def is_bad(root): if bad_roots == [""]: message = reflow( """ - Your current directory contains a `tensorboard` Python package - that does not include generated files. This can happen if your - current directory includes the TensorBoard source tree (e.g., - you are in the TensorBoard Git repository). Consider changing - to a different directory. - """ + Your current directory contains a `tensorboard` Python package + that does not include generated files. This can happen if your + current directory includes the TensorBoard source tree (e.g., + you are in the TensorBoard Git repository). Consider changing + to a different directory. + """ ) else: preamble = reflow( """ - Your Python path contains a `tensorboard` package that does - not include generated files. This can happen if your current - directory includes the TensorBoard source tree (e.g., you are - in the TensorBoard Git repository). The following directories - from your Python path may be problematic: - """ + Your Python path contains a `tensorboard` package that does + not include generated files. This can happen if your current + directory includes the TensorBoard source tree (e.g., you are + in the TensorBoard Git repository). The following directories + from your Python path may be problematic: + """ ) roots = [] realpaths_seen = set() @@ -488,22 +488,22 @@ def main(): print( reflow( """ - Please try each suggestion enumerated above to determine whether - it solves your problem. If none of these suggestions works, - please copy ALL of the above output, including the lines - containing only backticks, into your GitHub issue or comment. Be - sure to redact any sensitive information. - """ + Please try each suggestion enumerated above to determine whether + it solves your problem. If none of these suggestions works, + please copy ALL of the above output, including the lines + containing only backticks, into your GitHub issue or comment. Be + sure to redact any sensitive information. + """ ) ) else: print( reflow( """ - No action items identified. Please copy ALL of the above output, - including the lines containing only backticks, into your GitHub - issue or comment. Be sure to redact any sensitive information. - """ + No action items identified. Please copy ALL of the above output, + including the lines containing only backticks, into your GitHub + issue or comment. Be sure to redact any sensitive information. + """ ) ) diff --git a/tensorboard/uploader/uploader_main.py b/tensorboard/uploader/uploader_main.py index d9679f7cc9..5ed906ea85 100644 --- a/tensorboard/uploader/uploader_main.py +++ b/tensorboard/uploader/uploader_main.py @@ -329,15 +329,15 @@ class _DeleteExperimentIntent(_Intent): _MESSAGE_TEMPLATE = textwrap.dedent( u"""\ - This will delete the experiment on https://tensorboard.dev with the - following experiment ID: + This will delete the experiment on https://tensorboard.dev with the + following experiment ID: - {experiment_id} + {experiment_id} - You have chosen to delete an experiment. All experiments uploaded - to TensorBoard.dev are publicly visible. Do not upload sensitive - data. - """ + You have chosen to delete an experiment. All experiments uploaded + to TensorBoard.dev are publicly visible. Do not upload sensitive + data. + """ ) def __init__(self, experiment_id): @@ -377,10 +377,10 @@ class _ListIntent(_Intent): _MESSAGE = textwrap.dedent( u"""\ - This will list all experiments that you've uploaded to - https://tensorboard.dev. TensorBoard.dev experiments are visible - to everyone. Do not upload sensitive data. - """ + This will list all experiments that you've uploaded to + https://tensorboard.dev. TensorBoard.dev experiments are visible + to everyone. Do not upload sensitive data. + """ ) def get_ack_message_body(self): @@ -433,14 +433,14 @@ class _UploadIntent(_Intent): _MESSAGE_TEMPLATE = textwrap.dedent( u"""\ - This will upload your TensorBoard logs to https://tensorboard.dev/ from - the following directory: + This will upload your TensorBoard logs to https://tensorboard.dev/ from + the following directory: - {logdir} + {logdir} - This TensorBoard will be visible to everyone. Do not upload sensitive - data. - """ + This TensorBoard will be visible to everyone. Do not upload sensitive + data. + """ ) def __init__(self, logdir): @@ -480,15 +480,15 @@ class _ExportIntent(_Intent): _MESSAGE_TEMPLATE = textwrap.dedent( u"""\ - This will download all your experiment data from https://tensorboard.dev - and save it to the following directory: + This will download all your experiment data from https://tensorboard.dev + and save it to the following directory: - {output_dir} + {output_dir} - Downloading your experiment data does not delete it from the - service. All experiments uploaded to TensorBoard.dev are publicly - visible. Do not upload sensitive data. - """ + Downloading your experiment data does not delete it from the + service. All experiments uploaded to TensorBoard.dev are publicly + visible. Do not upload sensitive data. + """ ) def __init__(self, output_dir):