diff --git a/tensorboard/backend/application.py b/tensorboard/backend/application.py index 6ccde8d3f8..c300eea9dd 100644 --- a/tensorboard/backend/application.py +++ b/tensorboard/backend/application.py @@ -431,10 +431,10 @@ def _serve_plugin_entry(self, request): html = textwrap.dedent( """ - -
Here is some data:
-| TensorBoard usage | -Happiness | -
|---|---|
| 0.0 | -0.0 | -
| 0.5 | -0.5 | -
| 1.0 | -1.0 | -
Wouldn't you agree?
""" +Here is some data:
+| TensorBoard usage | +Happiness | +
|---|---|
| 0.0 | +0.0 | +
| 0.5 | +0.5 | +
| 1.0 | +1.0 | +
Wouldn't you agree?
+ """.rstrip() ), ) diff --git a/tensorboard/plugins/beholder/im_util.py b/tensorboard/plugins/beholder/im_util.py index db3e6e9ef6..06c1039a5b 100644 --- a/tensorboard/plugins/beholder/im_util.py +++ b/tensorboard/plugins/beholder/im_util.py @@ -35,9 +35,9 @@ def global_extrema(arrays): def scale_sections(sections, scaling_scope): """ - input: unscaled sections. - returns: sections scaled to [0, 255] - """ + input: unscaled sections. + returns: sections scaled to [0, 255] + """ new_sections = [] if scaling_scope == "layer": diff --git a/tensorboard/plugins/beholder/visualizer.py b/tensorboard/plugins/beholder/visualizer.py index 46fbca7a19..ff64043445 100644 --- a/tensorboard/plugins/beholder/visualizer.py +++ b/tensorboard/plugins/beholder/visualizer.py @@ -183,10 +183,10 @@ def _determine_section_height(self, array, show_all): def _arrays_to_sections(self, arrays): """ - input: unprocessed numpy arrays. - returns: columns of the size that they will appear in the image, not scaled - for display. That needs to wait until after variance is computed. - """ + input: unprocessed numpy arrays. + returns: columns of the size that they will appear in the image, not scaled + for display. That needs to wait until after variance is computed. + """ sections = [] sections_to_resize_later = {} show_all = self.config["show_all"] diff --git a/tensorboard/plugins/core/core_plugin.py b/tensorboard/plugins/core/core_plugin.py index 4dfe678f1e..78c5fb7836 100644 --- a/tensorboard/plugins/core/core_plugin.py +++ b/tensorboard/plugins/core/core_plugin.py @@ -191,13 +191,13 @@ def _serve_runs(self, request): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - run_name, - started_time IS NULL as started_time_nulls_last, - started_time - FROM Runs - ORDER BY started_time_nulls_last, started_time, run_name - """ + SELECT + run_name, + started_time IS NULL as started_time_nulls_last, + started_time + FROM Runs + ORDER BY started_time_nulls_last, started_time, run_name + """ ) run_names = [row[0] for row in cursor] else: @@ -237,15 +237,15 @@ def list_experiments_impl(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - experiment_id, - experiment_name, - started_time, - started_time IS NULL as started_time_nulls_last - FROM Experiments - ORDER BY started_time_nulls_last, started_time, experiment_name, - experiment_id - """ + SELECT + experiment_id, + experiment_name, + started_time, + started_time IS NULL as started_time_nulls_last + FROM Experiments + ORDER BY started_time_nulls_last, started_time, experiment_name, + experiment_id + """ ) results = [ {"id": row[0], "name": row[1], "startTime": row[2],} @@ -272,28 +272,28 @@ def _serve_experiment_runs(self, request): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - Runs.run_id, - Runs.run_name, - Runs.started_time, - Runs.started_time IS NULL as started_time_nulls_last, - Tags.tag_id, - Tags.tag_name, - Tags.display_name, - Tags.plugin_name, - Tags.inserted_time - From Runs - LEFT JOIN Tags ON Runs.run_id = Tags.run_id - WHERE Runs.experiment_id = ? - AND (Tags.tag_id IS NULL OR Tags.plugin_name IS NOT NULL) - ORDER BY started_time_nulls_last, - Runs.started_time, - Runs.run_name, - Runs.run_id, - Tags.tag_name, - Tags.display_name, - Tags.inserted_time; - """, + SELECT + Runs.run_id, + Runs.run_name, + Runs.started_time, + Runs.started_time IS NULL as started_time_nulls_last, + Tags.tag_id, + Tags.tag_name, + Tags.display_name, + Tags.plugin_name, + Tags.inserted_time + From Runs + LEFT JOIN Tags ON Runs.run_id = Tags.run_id + WHERE Runs.experiment_id = ? + AND (Tags.tag_id IS NULL OR Tags.plugin_name IS NOT NULL) + ORDER BY started_time_nulls_last, + Runs.started_time, + Runs.run_name, + Runs.run_id, + Tags.tag_name, + Tags.display_name, + Tags.inserted_time; + """, (exp_id,), ) for row in cursor: diff --git a/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py b/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py index 9212482583..e1c6808d87 100644 --- a/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py +++ b/tensorboard/plugins/custom_scalar/custom_scalars_plugin.py @@ -264,18 +264,18 @@ def scalars_impl(self, run, tag_regex_string): @wrappers.Request.application def layout_route(self, request): - r"""Fetches the custom layout specified by the config file in the logdir. + """Fetches the custom layout specified by the config file in the logdir. - If more than 1 run contains a layout, this method merges the layouts by - merging charts within individual categories. If 2 categories with the same - name are found, the charts within are merged. The merging is based on the - order of the runs to which the layouts are written. + If more than 1 run contains a layout, this method merges the layouts by + merging charts within individual categories. If 2 categories with the same + name are found, the charts within are merged. The merging is based on the + order of the runs to which the layouts are written. - The response is a JSON object mirroring properties of the Layout proto if a - layout for any run is found. + The response is a JSON object mirroring properties of the Layout proto if a + layout for any run is found. - The response is an empty object if no layout could be found. - """ + The response is an empty object if no layout could be found. + """ body = self.layout_impl() return http_util.Respond(request, body, "application/json") diff --git a/tensorboard/plugins/graph/graph_util_test.py b/tensorboard/plugins/graph/graph_util_test.py index c456727b63..552222a133 100644 --- a/tensorboard/plugins/graph/graph_util_test.py +++ b/tensorboard/plugins/graph/graph_util_test.py @@ -23,84 +23,84 @@ class GraphUtilTest(tf.test.TestCase): def test_combine_graph_defs(self): expected_proto = """ - node { - name: "X" - op: "Input" - } - node { - name: "W" - op: "Input" - } - node { - name: "Y" - op: "MatMul" - input: "X" - input: "W" - } - node { - name: "A" - op: "Input" - } - node { - name: "B" - op: "Input" - } - node { - name: "C" - op: "MatMul" - input: "A" - input: "B" - } - versions { - producer: 21 - } - """ + node { + name: "X" + op: "Input" + } + node { + name: "W" + op: "Input" + } + node { + name: "Y" + op: "MatMul" + input: "X" + input: "W" + } + node { + name: "A" + op: "Input" + } + node { + name: "B" + op: "Input" + } + node { + name: "C" + op: "MatMul" + input: "A" + input: "B" + } + versions { + producer: 21 + } + """ graph_def_a = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "W" - op: "Input" - } - node { - name: "Y" - op: "MatMul" - input: "X" - input: "W" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "W" + op: "Input" + } + node { + name: "Y" + op: "MatMul" + input: "X" + input: "W" + } + versions { + producer: 21 + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - node { - name: "A" - op: "Input" - } - node { - name: "B" - op: "Input" - } - node { - name: "C" - op: "MatMul" - input: "A" - input: "B" - } - versions { - producer: 21 - } - """, + node { + name: "A" + op: "Input" + } + node { + name: "B" + op: "Input" + } + node { + name: "C" + op: "MatMul" + input: "A" + input: "B" + } + versions { + producer: 21 + } + """, graph_def_b, ) @@ -111,68 +111,68 @@ def test_combine_graph_defs(self): def test_combine_graph_defs_name_collided_but_same_content(self): expected_proto = """ - node { - name: "X" - op: "Input" - } - node { - name: "W" - op: "Input" - } - node { - name: "Y" - op: "MatMul" - input: "X" - input: "W" - } - node { - name: "A" - op: "Input" - } - versions { - producer: 21 - } - """ + node { + name: "X" + op: "Input" + } + node { + name: "W" + op: "Input" + } + node { + name: "Y" + op: "MatMul" + input: "X" + input: "W" + } + node { + name: "A" + op: "Input" + } + versions { + producer: 21 + } + """ graph_def_a = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "W" - op: "Input" - } - node { - name: "Y" - op: "MatMul" - input: "X" - input: "W" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "W" + op: "Input" + } + node { + name: "Y" + op: "MatMul" + input: "X" + input: "W" + } + versions { + producer: 21 + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "A" - op: "Input" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "A" + op: "Input" + } + versions { + producer: 21 + } + """, graph_def_b, ) @@ -185,49 +185,49 @@ def test_combine_graph_defs_name_collided_different_content(self): graph_def_a = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "W" - op: "Input" - } - node { - name: "Y" - op: "MatMul" - input: "X" - input: "W" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "W" + op: "Input" + } + node { + name: "Y" + op: "MatMul" + input: "X" + input: "W" + } + versions { + producer: 21 + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - device: "cpu:0" - } - node { - name: "Z" - op: "Input" - } - node { - name: "Q" - op: "MatMul" - input: "X" - input: "Z" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + device: "cpu:0" + } + node { + name: "Z" + op: "Input" + } + node { + name: "Q" + op: "MatMul" + input: "X" + input: "Z" + } + versions { + producer: 21 + } + """, graph_def_b, ) @@ -245,36 +245,36 @@ def test_combine_graph_defs_dst_nodes_duplicate_keys(self): graph_def_a = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "X" - op: "Input" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "X" + op: "Input" + } + versions { + producer: 21 + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "Z" - op: "Input" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "Z" + op: "Input" + } + versions { + producer: 21 + } + """, graph_def_b, ) @@ -287,37 +287,37 @@ def test_combine_graph_defs_src_nodes_duplicate_keys(self): graph_def_a = GraphDef() text_format.Merge( """ - node { - name: "X" - op: "Input" - } - node { - name: "Y" - op: "Input" - } - versions { - producer: 21 - } - """, + node { + name: "X" + op: "Input" + } + node { + name: "Y" + op: "Input" + } + versions { + producer: 21 + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - node { - name: "W" - op: "Input" - device: "cpu:0" - } - node { - name: "W" - op: "Input" - } - versions { - producer: 21 - } - """, + node { + name: "W" + op: "Input" + device: "cpu:0" + } + node { + name: "W" + op: "Input" + } + versions { + producer: 21 + } + """, graph_def_b, ) @@ -328,120 +328,120 @@ def test_combine_graph_defs_src_nodes_duplicate_keys(self): def test_combine_graph_defs_function(self): expected_proto = """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - function { - signature { - name: "foo_1" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + function { + signature { + name: "foo_1" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """ + """ graph_def_a = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - function { - signature { - name: "foo_1" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + function { + signature { + name: "foo_1" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + } + """, graph_def_b, ) @@ -454,75 +454,75 @@ def test_combine_graph_defs_function_collison(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "div" - op: "Div" - input: "x" - input: "y" - } - } - function { - signature { - name: "foo_1" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "div" + op: "Div" + input: "x" + input: "y" + } + } + function { + signature { + name: "foo_1" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + } + """, graph_def_b, ) @@ -540,69 +540,69 @@ def test_combine_graph_defs_dst_function_duplicate_keys(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - function { - signature { - name: "foo" - input_arg { - name: "y" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + function { + signature { + name: "foo" + input_arg { + name: "y" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "bar" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "div" - op: "Div" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "bar" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "div" + op: "Div" + input: "x" + input: "y" + } + } + } + """, graph_def_b, ) @@ -617,63 +617,63 @@ def test_combine_graph_defs_src_function_duplicate_keys(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "foo" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - node_def { - name: "add" - op: "Add" - input: "x" - input: "y" - } - } - } - """, + library { + function { + signature { + name: "foo" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + node_def { + name: "add" + op: "Add" + input: "x" + input: "y" + } + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - function { - signature { - name: "bar" - input_arg { - name: "x" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - } - function { - signature { - name: "bar" - input_arg { - name: "y" - type: DT_HALF - } - output_arg { - name: "identity" - type: DT_HALF - } - } - } - } - """, + library { + function { + signature { + name: "bar" + input_arg { + name: "x" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + } + function { + signature { + name: "bar" + input_arg { + name: "y" + type: DT_HALF + } + output_arg { + name: "identity" + type: DT_HALF + } + } + } + } + """, graph_def_b, ) @@ -686,45 +686,45 @@ def test_combine_graph_defs_src_function_duplicate_keys(self): def test_combine_graph_defs_gradient(self): expected_proto = """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - gradient { - function_name: "bar" - gradient_func: "bar_grad" - } - } - """ + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + gradient { + function_name: "bar" + gradient_func: "bar_grad" + } + } + """ graph_def_a = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - } - """, + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - gradient { - function_name: "bar" - gradient_func: "bar_grad" - } - } - """, + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + gradient { + function_name: "bar" + gradient_func: "bar_grad" + } + } + """, graph_def_b, ) @@ -737,30 +737,30 @@ def test_combine_graph_defs_gradient_collison(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - } - """, + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "bar" - gradient_func: "bar_grad" - } - gradient { - function_name: "foo_1" - gradient_func: "foo_grad" - } - } - """, + library { + gradient { + function_name: "bar" + gradient_func: "bar_grad" + } + gradient { + function_name: "foo_1" + gradient_func: "foo_grad" + } + } + """, graph_def_b, ) @@ -778,30 +778,30 @@ def test_combine_graph_defs_dst_gradient_func_non_unique(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - gradient { - function_name: "foo_bar" - gradient_func: "foo_grad" - } - } - """, + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + gradient { + function_name: "foo_bar" + gradient_func: "foo_grad" + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "bar" - gradient_func: "bar_grad" - } - } - """, + library { + gradient { + function_name: "bar" + gradient_func: "bar_grad" + } + } + """, graph_def_b, ) @@ -816,30 +816,30 @@ def test_combine_graph_defs_src_gradient_func_non_unique(self): graph_def_a = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "foo" - gradient_func: "foo_grad" - } - } - """, + library { + gradient { + function_name: "foo" + gradient_func: "foo_grad" + } + } + """, graph_def_a, ) graph_def_b = GraphDef() text_format.Merge( """ - library { - gradient { - function_name: "bar" - gradient_func: "bar_grad" - } - gradient { - function_name: "bar_baz" - gradient_func: "bar_grad" - } - } - """, + library { + gradient { + function_name: "bar" + gradient_func: "bar_grad" + } + gradient { + function_name: "bar_baz" + gradient_func: "bar_grad" + } + } + """, graph_def_b, ) diff --git a/tensorboard/plugins/graph/keras_util.py b/tensorboard/plugins/graph/keras_util.py index 646569361e..30f307a2ce 100644 --- a/tensorboard/plugins/graph/keras_util.py +++ b/tensorboard/plugins/graph/keras_util.py @@ -47,14 +47,15 @@ def _walk_layers(keras_layer): """Walks the nested keras layer configuration in preorder. - Args: - keras_layer: Keras configuration from model.to_json. - - Yields: - A tuple of (name_scope, layer_config). - name_scope: a string representing a scope name, similar to that of tf.name_scope. - layer_config: a dict representing a Keras layer configuration. - """ + + Args: + keras_layer: Keras configuration from model.to_json. + + Yields: + A tuple of (name_scope, layer_config). + name_scope: a string representing a scope name, similar to that of tf.name_scope. + layer_config: a dict representing a Keras layer configuration. + """ yield ("", keras_layer) if keras_layer.get("config").get("layers"): name_scope = keras_layer.get("config").get("name") diff --git a/tensorboard/plugins/graph/keras_util_test.py b/tensorboard/plugins/graph/keras_util_test.py index 669fafed81..aefae88f5f 100644 --- a/tensorboard/plugins/graph/keras_util_test.py +++ b/tensorboard/plugins/graph/keras_util_test.py @@ -37,70 +37,70 @@ def assertGraphDefToModel(self, expected_proto, model): def test_keras_model_to_graph_def_sequential_model(self): expected_proto = """ - node { - name: "sequential/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential/my_relu" - input: "sequential/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - node { - name: "sequential/dense_1" - input: "sequential/my_relu" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential/activation" - input: "sequential/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - """ + node { + name: "sequential/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential/my_relu" + input: "sequential/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + node { + name: "sequential/dense_1" + input: "sequential/my_relu" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential/activation" + input: "sequential/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + """ model = tf.keras.models.Sequential( [ tf.keras.layers.Dense(32, input_shape=(784,)), @@ -113,70 +113,70 @@ def test_keras_model_to_graph_def_sequential_model(self): def test_keras_model_to_graph_def_functional_model(self): expected_proto = """ - node { - name: "model/functional_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/dense" - input: "model/functional_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/dense_1" - input: "model/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/dense_2" - input: "model/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - """ + node { + name: "model/functional_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/dense" + input: "model/functional_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/dense_1" + input: "model/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/dense_2" + input: "model/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + """ inputs = tf.keras.layers.Input(shape=(784,), name="functional_input") d0 = tf.keras.layers.Dense(64, activation="relu") d1 = tf.keras.layers.Dense(64, activation="relu") @@ -187,71 +187,71 @@ def test_keras_model_to_graph_def_functional_model(self): def test_keras_model_to_graph_def_functional_model_with_cycle(self): expected_proto = """ - node { - name: "model/cycle_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/dense" - input: "model/cycle_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/dense_1" - input: "model/dense" - input: "model/dense_2" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/dense_2" - input: "model/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - """ + node { + name: "model/cycle_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/dense" + input: "model/cycle_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/dense_1" + input: "model/dense" + input: "model/dense_2" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/dense_2" + input: "model/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + """ inputs = tf.keras.layers.Input(shape=(784,), name="cycle_input") d0 = tf.keras.layers.Dense(64, activation="relu") d1 = tf.keras.layers.Dense(64, activation="relu") @@ -264,38 +264,38 @@ def test_keras_model_to_graph_def_functional_model_with_cycle(self): def test_keras_model_to_graph_def_lstm_model(self): expected_proto = """ - node { - name: "model/lstm_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/simple_rnn" - input: "model/lstm_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "SimpleRNN" - } - } - } - """ + node { + name: "model/lstm_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/simple_rnn" + input: "model/lstm_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "SimpleRNN" + } + } + } + """ inputs = tf.keras.layers.Input(shape=(None, 5), name="lstm_input") encoder = tf.keras.layers.SimpleRNN(256) @@ -304,86 +304,86 @@ def test_keras_model_to_graph_def_lstm_model(self): def test_keras_model_to_graph_def_nested_sequential_model(self): expected_proto = """ - node { - name: "sequential_2/sequential_1/sequential/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential_2/sequential_1/sequential/activation" - input: "sequential_2/sequential_1/sequential/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - node { - name: "sequential_2/sequential_1/my_relu" - input: "sequential_2/sequential_1/sequential/activation" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - node { - name: "sequential_2/dense_1" - input: "sequential_2/sequential_1/my_relu" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential_2/activation_1" - input: "sequential_2/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - """ + node { + name: "sequential_2/sequential_1/sequential/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential_2/sequential_1/sequential/activation" + input: "sequential_2/sequential_1/sequential/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + node { + name: "sequential_2/sequential_1/my_relu" + input: "sequential_2/sequential_1/sequential/activation" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + node { + name: "sequential_2/dense_1" + input: "sequential_2/sequential_1/my_relu" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential_2/activation_1" + input: "sequential_2/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + """ sub_sub_model = tf.keras.models.Sequential( [ tf.keras.layers.Dense(32, input_shape=(784,)), @@ -407,134 +407,134 @@ def test_keras_model_to_graph_def_nested_sequential_model(self): def test_keras_model_to_graph_def_functional_multi_inputs(self): expected_proto = """ - node { - name: "model/main_input" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/embedding" - input: "model/main_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Embedding" - } - } - } - node { - name: "model/simple_rnn" - input: "model/embedding" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "SimpleRNN" - } - } - } - node { - name: "model/aux_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/concatenate" - input: "model/simple_rnn" - input: "model/aux_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Concatenate" - } - } - } - node { - name: "model/dense" - input: "model/concatenate" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/main_output" - input: "model/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/aux_output" - input: "model/simple_rnn" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - """ + node { + name: "model/main_input" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/embedding" + input: "model/main_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Embedding" + } + } + } + node { + name: "model/simple_rnn" + input: "model/embedding" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "SimpleRNN" + } + } + } + node { + name: "model/aux_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/concatenate" + input: "model/simple_rnn" + input: "model/aux_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Concatenate" + } + } + } + node { + name: "model/dense" + input: "model/concatenate" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/main_output" + input: "model/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/aux_output" + input: "model/simple_rnn" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + """ main_input = tf.keras.layers.Input( shape=(100,), dtype="int32", name="main_input" ) @@ -564,132 +564,132 @@ def test_keras_model_to_graph_def_functional_multi_inputs(self): def test_keras_model_to_graph_def_functional_model_as_layer(self): expected_proto = """ - node { - name: "model_1/sub_func_input_2" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model_1/sub_func_input_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model_1/model/sub_func_input_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model_1/model/sub_func_input_2" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model_1/model/dense" - input: "model_1/model/sub_func_input_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model_1/model/dense_1" - input: "model_1/model/sub_func_input_2" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model_1/concatenate" - input: "model_1/model/dense" - input: "model_1/model/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Concatenate" - } - } - } - node { - name: "model_1/dense_2" - input: "model_1/concatenate" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - """ + node { + name: "model_1/sub_func_input_2" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model_1/sub_func_input_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model_1/model/sub_func_input_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model_1/model/sub_func_input_2" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model_1/model/dense" + input: "model_1/model/sub_func_input_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model_1/model/dense_1" + input: "model_1/model/sub_func_input_2" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model_1/concatenate" + input: "model_1/model/dense" + input: "model_1/model/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Concatenate" + } + } + } + node { + name: "model_1/dense_2" + input: "model_1/concatenate" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + """ inputs1 = tf.keras.layers.Input(shape=(784,), name="sub_func_input_1") inputs2 = tf.keras.layers.Input(shape=(784,), name="sub_func_input_2") d0 = tf.keras.layers.Dense(64, activation="relu") @@ -711,70 +711,70 @@ def test_keras_model_to_graph_def_functional_model_as_layer(self): def test_keras_model_to_graph_def_functional_sequential_model(self): expected_proto = """ - node { - name: "model/func_seq_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "model/sequential/dense" - input: "model/func_seq_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "model/sequential/my_relu" - input: "model/sequential/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - node { - name: "model/dense_1" - input: "model/sequential/my_relu" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - """ + node { + name: "model/func_seq_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "model/sequential/dense" + input: "model/func_seq_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "model/sequential/my_relu" + input: "model/sequential/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + node { + name: "model/dense_1" + input: "model/sequential/my_relu" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + """ inputs = tf.keras.layers.Input(shape=(784,), name="func_seq_input") sub_model = tf.keras.models.Sequential( [ @@ -792,70 +792,70 @@ def test_keras_model_to_graph_def_functional_sequential_model(self): def test_keras_model_to_graph_def_sequential_functional_model(self): expected_proto = """ - node { - name: "sequential/model/func_seq_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "InputLayer" - } - } - } - node { - name: "sequential/model/dense" - input: "sequential/model/func_seq_input" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential/dense_1" - input: "sequential/model/dense" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Dense" - } - } - } - node { - name: "sequential/my_relu" - input: "sequential/dense_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "keras_class" - value { - s: "Activation" - } - } - } - """ + node { + name: "sequential/model/func_seq_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "InputLayer" + } + } + } + node { + name: "sequential/model/dense" + input: "sequential/model/func_seq_input" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential/dense_1" + input: "sequential/model/dense" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Dense" + } + } + } + node { + name: "sequential/my_relu" + input: "sequential/dense_1" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "keras_class" + value { + s: "Activation" + } + } + } + """ inputs = tf.keras.layers.Input(shape=(784,), name="func_seq_input") dense = tf.keras.layers.Dense(64, activation="relu") diff --git a/tensorboard/plugins/histogram/histograms_plugin.py b/tensorboard/plugins/histogram/histograms_plugin.py index 201521a30e..b3367d2e56 100644 --- a/tensorboard/plugins/histogram/histograms_plugin.py +++ b/tensorboard/plugins/histogram/histograms_plugin.py @@ -86,12 +86,12 @@ def is_active(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - 1 - FROM Tags - WHERE Tags.plugin_name = ? - LIMIT 1 - """, + SELECT + 1 + FROM Tags + WHERE Tags.plugin_name = ? + LIMIT 1 + """, (metadata.PLUGIN_NAME,), ) return bool(list(cursor)) @@ -125,16 +125,16 @@ def index_impl(self, experiment): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - Tags.tag_name, - Tags.display_name, - Runs.run_name - FROM Tags - JOIN Runs - ON Tags.run_id = Runs.run_id - WHERE - Tags.plugin_name = ? - """, + SELECT + Tags.tag_name, + Tags.display_name, + Runs.run_name + FROM Tags + JOIN Runs + ON Tags.run_id = Runs.run_id + WHERE + Tags.plugin_name = ? + """, (metadata.PLUGIN_NAME,), ) result = collections.defaultdict(dict) @@ -213,15 +213,15 @@ def histograms_impl(self, tag, run, experiment, downsample_to=None): # Prefetch the tag ID matching this run and tag. cursor.execute( """ - SELECT - tag_id - FROM Tags - JOIN Runs USING (run_id) - WHERE - Runs.run_name = :run - AND Tags.tag_name = :tag - AND Tags.plugin_name = :plugin - """, + SELECT + tag_id + FROM Tags + JOIN Runs USING (run_id) + WHERE + Runs.run_name = :run + AND Tags.tag_name = :tag + AND Tags.plugin_name = :plugin + """, {"run": run, "tag": tag, "plugin": metadata.PLUGIN_NAME}, ) row = cursor.fetchone() @@ -240,30 +240,30 @@ def histograms_impl(self, tag, run, experiment, downsample_to=None): # [s_min + math.ceil(i / k * (s_max - s_min)) for i in range(0, k + 1)] cursor.execute( """ - SELECT - MIN(step) AS step, - computed_time, - data, - dtype, - shape - FROM Tensors - INNER JOIN ( - SELECT - MIN(step) AS min_step, - MAX(step) AS max_step - FROM Tensors - /* Filter out NULL so we can use TensorSeriesStepIndex. */ - WHERE series = :tag_id AND step IS NOT NULL - ) - /* Ensure we omit reserved rows, which have NULL step values. */ - WHERE series = :tag_id AND step IS NOT NULL - /* Bucket rows into sample_size linearly spaced buckets, or do - no sampling if sample_size is NULL. */ - GROUP BY - IFNULL(:sample_size - 1, max_step - min_step) - * (step - min_step) / (max_step - min_step) - ORDER BY step - """, + SELECT + MIN(step) AS step, + computed_time, + data, + dtype, + shape + FROM Tensors + INNER JOIN ( + SELECT + MIN(step) AS min_step, + MAX(step) AS max_step + FROM Tensors + /* Filter out NULL so we can use TensorSeriesStepIndex. */ + WHERE series = :tag_id AND step IS NOT NULL + ) + /* Ensure we omit reserved rows, which have NULL step values. */ + WHERE series = :tag_id AND step IS NOT NULL + /* Bucket rows into sample_size linearly spaced buckets, or do + no sampling if sample_size is NULL. */ + GROUP BY + IFNULL(:sample_size - 1, max_step - min_step) + * (step - min_step) / (max_step - min_step) + ORDER BY step + """, {"tag_id": tag_id, "sample_size": downsample_to}, ) events = [ diff --git a/tensorboard/plugins/hparams/backend_context_test.py b/tensorboard/plugins/hparams/backend_context_test.py index 190a5c68f6..b73c60d3af 100644 --- a/tensorboard/plugins/hparams/backend_context_test.py +++ b/tensorboard/plugins/hparams/backend_context_test.py @@ -99,11 +99,11 @@ def _mock_plugin_run_to_tag_to_content(self, plugin_name): def test_experiment_with_experiment_tag(self): experiment = """ - description: 'Test experiment' - metric_infos: [ - { name: { tag: 'current_temp' } } - ] - """ + description: 'Test experiment' + metric_infos: [ + { name: { tag: 'current_temp' } } + ] + """ self._mock_multiplexer.PluginRunToTagToContent.side_effect = None self._mock_multiplexer.PluginRunToTagToContent.return_value = { "exp": { @@ -117,56 +117,56 @@ def test_experiment_with_experiment_tag(self): def test_experiment_without_experiment_tag(self): self.session_1_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {number_value: 100}}, - {key: 'lr' value: {number_value: 0.01}}, - {key: 'model_type' value: {string_value: 'CNN'}} - ] - """ + hparams: [ + {key: 'batch_size' value: {number_value: 100}}, + {key: 'lr' value: {number_value: 0.01}}, + {key: 'model_type' value: {string_value: 'CNN'}} + ] + """ self.session_2_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {number_value: 200}}, - {key: 'lr' value: {number_value: 0.02}}, - {key: 'model_type' value: {string_value: 'LATTICE'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {number_value: 200}}, + {key: 'lr' value: {number_value: 0.02}}, + {key: 'model_type' value: {string_value: 'LATTICE'}} + ] + """ self.session_3_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {number_value: 300}}, - {key: 'lr' value: {number_value: 0.05}}, - {key: 'model_type' value: {string_value: 'CNN'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {number_value: 300}}, + {key: 'lr' value: {number_value: 0.05}}, + {key: 'model_type' value: {string_value: 'CNN'}} + ] + """ expected_exp = """ - hparam_infos: { - name: 'batch_size' - type: DATA_TYPE_FLOAT64 - }, - hparam_infos: { - name: 'lr' - type: DATA_TYPE_FLOAT64 - }, - hparam_infos: { - name: 'model_type' - type: DATA_TYPE_STRING - domain_discrete: { - values: [{string_value: 'CNN'}, - {string_value: 'LATTICE'}] - } - } - metric_infos: { - name: {group: '', tag: 'accuracy'} - } - metric_infos: { - name: {group: '', tag: 'loss'} - } - metric_infos: { - name: {group: 'eval', tag: 'loss'} - } - metric_infos: { - name: {group: 'train', tag: 'loss'} - } - """ + hparam_infos: { + name: 'batch_size' + type: DATA_TYPE_FLOAT64 + }, + hparam_infos: { + name: 'lr' + type: DATA_TYPE_FLOAT64 + }, + hparam_infos: { + name: 'model_type' + type: DATA_TYPE_STRING + domain_discrete: { + values: [{string_value: 'CNN'}, + {string_value: 'LATTICE'}] + } + } + metric_infos: { + name: {group: '', tag: 'accuracy'} + } + metric_infos: { + name: {group: '', tag: 'loss'} + } + metric_infos: { + name: {group: 'eval', tag: 'loss'} + } + metric_infos: { + name: {group: 'train', tag: 'loss'} + } + """ ctxt = backend_context.Context(self._mock_tb_context) actual_exp = ctxt.experiment() _canonicalize_experiment(actual_exp) @@ -174,61 +174,61 @@ def test_experiment_without_experiment_tag(self): def test_experiment_without_experiment_tag_different_hparam_types(self): self.session_1_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {number_value: 100}}, - {key: 'lr' value: {string_value: '0.01'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {number_value: 100}}, + {key: 'lr' value: {string_value: '0.01'}} + ] + """ self.session_2_start_info_ = """ - hparams:[ - {key: 'lr' value: {number_value: 0.02}}, - {key: 'model_type' value: {string_value: 'LATTICE'}} - ] - """ + hparams:[ + {key: 'lr' value: {number_value: 0.02}}, + {key: 'model_type' value: {string_value: 'LATTICE'}} + ] + """ self.session_3_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {bool_value: true}}, - {key: 'model_type' value: {string_value: 'CNN'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {bool_value: true}}, + {key: 'model_type' value: {string_value: 'CNN'}} + ] + """ expected_exp = """ - hparam_infos: { - name: 'batch_size' - type: DATA_TYPE_STRING - domain_discrete: { - values: [{string_value: '100.0'}, - {string_value: 'true'}] - } - } - hparam_infos: { - name: 'lr' - type: DATA_TYPE_STRING - domain_discrete: { - values: [{string_value: '0.01'}, - {string_value: '0.02'}] - } - } - hparam_infos: { - name: 'model_type' - type: DATA_TYPE_STRING - domain_discrete: { - values: [{string_value: 'CNN'}, - {string_value: 'LATTICE'}] - } - } - metric_infos: { - name: {group: '', tag: 'accuracy'} - } - metric_infos: { - name: {group: '', tag: 'loss'} - } - metric_infos: { - name: {group: 'eval', tag: 'loss'} - } - metric_infos: { - name: {group: 'train', tag: 'loss'} - } - """ + hparam_infos: { + name: 'batch_size' + type: DATA_TYPE_STRING + domain_discrete: { + values: [{string_value: '100.0'}, + {string_value: 'true'}] + } + } + hparam_infos: { + name: 'lr' + type: DATA_TYPE_STRING + domain_discrete: { + values: [{string_value: '0.01'}, + {string_value: '0.02'}] + } + } + hparam_infos: { + name: 'model_type' + type: DATA_TYPE_STRING + domain_discrete: { + values: [{string_value: 'CNN'}, + {string_value: 'LATTICE'}] + } + } + metric_infos: { + name: {group: '', tag: 'accuracy'} + } + metric_infos: { + name: {group: '', tag: 'loss'} + } + metric_infos: { + name: {group: 'eval', tag: 'loss'} + } + metric_infos: { + name: {group: 'train', tag: 'loss'} + } + """ ctxt = backend_context.Context(self._mock_tb_context) actual_exp = ctxt.experiment() _canonicalize_experiment(actual_exp) @@ -236,52 +236,52 @@ def test_experiment_without_experiment_tag_different_hparam_types(self): def test_experiment_without_experiment_tag_many_distinct_values(self): self.session_1_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {number_value: 100}}, - {key: 'lr' value: {string_value: '0.01'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {number_value: 100}}, + {key: 'lr' value: {string_value: '0.01'}} + ] + """ self.session_2_start_info_ = """ - hparams:[ - {key: 'lr' value: {number_value: 0.02}}, - {key: 'model_type' value: {string_value: 'CNN'}} - ] - """ + hparams:[ + {key: 'lr' value: {number_value: 0.02}}, + {key: 'model_type' value: {string_value: 'CNN'}} + ] + """ self.session_3_start_info_ = """ - hparams:[ - {key: 'batch_size' value: {bool_value: true}}, - {key: 'model_type' value: {string_value: 'CNN'}} - ] - """ + hparams:[ + {key: 'batch_size' value: {bool_value: true}}, + {key: 'model_type' value: {string_value: 'CNN'}} + ] + """ expected_exp = """ - hparam_infos: { - name: 'batch_size' - type: DATA_TYPE_STRING - } - hparam_infos: { - name: 'lr' - type: DATA_TYPE_STRING - } - hparam_infos: { - name: 'model_type' - type: DATA_TYPE_STRING - domain_discrete: { - values: [{string_value: 'CNN'}] - } - } - metric_infos: { - name: {group: '', tag: 'accuracy'} - } - metric_infos: { - name: {group: '', tag: 'loss'} - } - metric_infos: { - name: {group: 'eval', tag: 'loss'} - } - metric_infos: { - name: {group: 'train', tag: 'loss'} - } - """ + hparam_infos: { + name: 'batch_size' + type: DATA_TYPE_STRING + } + hparam_infos: { + name: 'lr' + type: DATA_TYPE_STRING + } + hparam_infos: { + name: 'model_type' + type: DATA_TYPE_STRING + domain_discrete: { + values: [{string_value: 'CNN'}] + } + } + metric_infos: { + name: {group: '', tag: 'accuracy'} + } + metric_infos: { + name: {group: '', tag: 'loss'} + } + metric_infos: { + name: {group: 'eval', tag: 'loss'} + } + metric_infos: { + name: {group: 'train', tag: 'loss'} + } + """ ctxt = backend_context.Context( self._mock_tb_context, max_domain_discrete_len=1 ) diff --git a/tensorboard/plugins/hparams/keras_test.py b/tensorboard/plugins/hparams/keras_test.py index 152611eed7..d7ec6598a9 100644 --- a/tensorboard/plugins/hparams/keras_test.py +++ b/tensorboard/plugins/hparams/keras_test.py @@ -110,21 +110,21 @@ def mock_time(): expected_start_pb = plugin_data_pb2.SessionStartInfo() text_format.Merge( """ - start_time_secs: 1234.5 - group_name: "my_trial" - hparams { - key: "optimizer" - value { - string_value: "adam" - } - } - hparams { - key: "dense_neurons" - value { - number_value: 8.0 - } - } - """, + start_time_secs: 1234.5 + group_name: "my_trial" + hparams { + key: "optimizer" + value { + string_value: "adam" + } + } + hparams { + key: "dense_neurons" + value { + number_value: 8.0 + } + } + """, expected_start_pb, ) self.assertEqual(start_pb, expected_start_pb) @@ -132,9 +132,9 @@ def mock_time(): expected_end_pb = plugin_data_pb2.SessionEndInfo() text_format.Merge( """ - end_time_secs: 6789.0 - status: STATUS_SUCCESS - """, + end_time_secs: 6789.0 + status: STATUS_SUCCESS + """, expected_end_pb, ) self.assertEqual(end_pb, expected_end_pb) diff --git a/tensorboard/plugins/hparams/list_metric_evals_test.py b/tensorboard/plugins/hparams/list_metric_evals_test.py index da14f7dea5..8f3d489832 100644 --- a/tensorboard/plugins/hparams/list_metric_evals_test.py +++ b/tensorboard/plugins/hparams/list_metric_evals_test.py @@ -57,11 +57,13 @@ def _run_handler(self, request): def test_run(self): result = self._run_handler( - """session_name: '/this/is/a/session' - metric_name: { - tag: 'metric_tag' - group: 'metric_group' - }""" + """ + session_name: '/this/is/a/session' + metric_name: { + tag: 'metric_tag' + group: 'metric_group' + } + """ ) self.assertEqual([(1, 1, 1.0), (2, 2, 2.0), (3, 3, 3.0)], result) diff --git a/tensorboard/plugins/hparams/list_session_groups_test.py b/tensorboard/plugins/hparams/list_session_groups_test.py index 800ad4d259..8c58e5bde6 100644 --- a/tensorboard/plugins/hparams/list_session_groups_test.py +++ b/tensorboard/plugins/hparams/list_session_groups_test.py @@ -63,140 +63,140 @@ def setUp(self): metadata.EXPERIMENT_TAG: self._serialized_plugin_data( DATA_TYPE_EXPERIMENT, """ - description: 'Test experiment' - user: 'Test user' - hparam_infos: [ - { - name: 'initial_temp' - type: DATA_TYPE_FLOAT64 - }, - { - name: 'final_temp' - type: DATA_TYPE_FLOAT64 - }, - { name: 'string_hparam' }, - { name: 'bool_hparam' }, - { name: 'optional_string_hparam' } - ] - metric_infos: [ - { name: { tag: 'current_temp' } }, - { name: { tag: 'delta_temp' } }, - { name: { tag: 'optional_metric' } } - ] - """, + description: 'Test experiment' + user: 'Test user' + hparam_infos: [ + { + name: 'initial_temp' + type: DATA_TYPE_FLOAT64 + }, + { + name: 'final_temp' + type: DATA_TYPE_FLOAT64 + }, + { name: 'string_hparam' }, + { name: 'bool_hparam' }, + { name: 'optional_string_hparam' } + ] + metric_infos: [ + { name: { tag: 'current_temp' } }, + { name: { tag: 'delta_temp' } }, + { name: { tag: 'optional_metric' } } + ] + """, ) }, "session_1": { metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_START_INFO, """ - hparams:{ key: 'initial_temp' value: { number_value: 270 } }, - hparams:{ key: 'final_temp' value: { number_value: 150 } }, - hparams:{ - key: 'string_hparam' value: { string_value: 'a string' } - }, - hparams:{ key: 'bool_hparam' value: { bool_value: true } } - group_name: 'group_1' - start_time_secs: 314159 - """, + hparams:{ key: 'initial_temp' value: { number_value: 270 } }, + hparams:{ key: 'final_temp' value: { number_value: 150 } }, + hparams:{ + key: 'string_hparam' value: { string_value: 'a string' } + }, + hparams:{ key: 'bool_hparam' value: { bool_value: true } } + group_name: 'group_1' + start_time_secs: 314159 + """, ), metadata.SESSION_END_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_END_INFO, """ - status: STATUS_SUCCESS - end_time_secs: 314164 - """, + status: STATUS_SUCCESS + end_time_secs: 314164 + """, ), }, "session_2": { metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_START_INFO, """ - hparams:{ key: 'initial_temp' value: { number_value: 280 } }, - hparams:{ key: 'final_temp' value: { number_value: 100 } }, - hparams:{ - key: 'string_hparam' value: { string_value: 'AAAAA' } - }, - hparams:{ key: 'bool_hparam' value: { bool_value: false } } - group_name: 'group_2' - start_time_secs: 314159 - """, + hparams:{ key: 'initial_temp' value: { number_value: 280 } }, + hparams:{ key: 'final_temp' value: { number_value: 100 } }, + hparams:{ + key: 'string_hparam' value: { string_value: 'AAAAA' } + }, + hparams:{ key: 'bool_hparam' value: { bool_value: false } } + group_name: 'group_2' + start_time_secs: 314159 + """, ), metadata.SESSION_END_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_END_INFO, """ - status: STATUS_SUCCESS - end_time_secs: 314164 - """, + status: STATUS_SUCCESS + end_time_secs: 314164 + """, ), }, "session_3": { metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_START_INFO, """ - hparams:{ key: 'initial_temp' value: { number_value: 280 } }, - hparams:{ key: 'final_temp' value: { number_value: 100 } }, - hparams:{ - key: 'string_hparam' value: { string_value: 'AAAAA' } - }, - hparams:{ key: 'bool_hparam' value: { bool_value: false } } - group_name: 'group_2' - start_time_secs: 314159 - """, + hparams:{ key: 'initial_temp' value: { number_value: 280 } }, + hparams:{ key: 'final_temp' value: { number_value: 100 } }, + hparams:{ + key: 'string_hparam' value: { string_value: 'AAAAA' } + }, + hparams:{ key: 'bool_hparam' value: { bool_value: false } } + group_name: 'group_2' + start_time_secs: 314159 + """, ), metadata.SESSION_END_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_END_INFO, """ - status: STATUS_FAILURE - end_time_secs: 314164 - """, + status: STATUS_FAILURE + end_time_secs: 314164 + """, ), }, "session_4": { metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_START_INFO, """ - hparams:{ key: 'initial_temp' value: { number_value: 300 } }, - hparams:{ key: 'final_temp' value: { number_value: 120 } }, - hparams:{ - key: 'string_hparam' value: { string_value: 'a string_3' } - }, - hparams:{ key: 'bool_hparam' value: { bool_value: true } } - hparams:{ - key: 'optional_string_hparam' value { string_value: 'BB' } - }, - group_name: 'group_3' - start_time_secs: 314159 - """, + hparams:{ key: 'initial_temp' value: { number_value: 300 } }, + hparams:{ key: 'final_temp' value: { number_value: 120 } }, + hparams:{ + key: 'string_hparam' value: { string_value: 'a string_3' } + }, + hparams:{ key: 'bool_hparam' value: { bool_value: true } } + hparams:{ + key: 'optional_string_hparam' value { string_value: 'BB' } + }, + group_name: 'group_3' + start_time_secs: 314159 + """, ), metadata.SESSION_END_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_END_INFO, """ - status: STATUS_UNKNOWN - end_time_secs: 314164 - """, + status: STATUS_UNKNOWN + end_time_secs: 314164 + """, ), }, "session_5": { metadata.SESSION_START_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_START_INFO, """ - hparams:{ key: 'initial_temp' value: { number_value: 280 } }, - hparams:{ key: 'final_temp' value: { number_value: 100 } }, - hparams:{ - key: 'string_hparam' value: { string_value: 'AAAAA' } - }, - hparams:{ key: 'bool_hparam' value: { bool_value: false } } - group_name: 'group_2' - start_time_secs: 314159 - """, + hparams:{ key: 'initial_temp' value: { number_value: 280 } }, + hparams:{ key: 'final_temp' value: { number_value: 100 } }, + hparams:{ + key: 'string_hparam' value: { string_value: 'AAAAA' } + }, + hparams:{ key: 'bool_hparam' value: { bool_value: false } } + group_name: 'group_2' + start_time_secs: 314159 + """, ), metadata.SESSION_END_INFO_TAG: self._serialized_plugin_data( DATA_TYPE_SESSION_END_INFO, """ - status: STATUS_SUCCESS - end_time_secs: 314164 - """, + status: STATUS_SUCCESS + end_time_secs: 314164 + """, ), }, } @@ -331,191 +331,193 @@ def test_empty_request(self): def test_no_filter_no_sort(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_AVG - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_AVG + """ response = self._run_handler(request) self.assertProtoEquals( """ - session_groups { - name: "group_1" - hparams { key: "bool_hparam" value { bool_value: true } } - hparams { key: "final_temp" value { number_value: 150.0 } } - hparams { key: "initial_temp" value { number_value: 270.0 } } - hparams { key: "string_hparam" value { string_value: "a string" } } - metric_values { - name { tag: "current_temp" } - value: 10 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { name { tag: "delta_temp" } value: 15 - training_step: 2 - wall_time_secs: 10.0 - } - metric_values { name { tag: "optional_metric" } value: 33 - training_step: 20 - wall_time_secs: 2.0 - } - sessions { - name: "session_1" - start_time_secs: 314159 - end_time_secs: 314164 - status: STATUS_SUCCESS - metric_values { - name { tag: "current_temp" } - value: 10 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { - name { tag: "delta_temp" } - value: 15 - training_step: 2 - wall_time_secs: 10.0 - } + session_groups { + name: "group_1" + hparams { key: "bool_hparam" value { bool_value: true } } + hparams { key: "final_temp" value { number_value: 150.0 } } + hparams { key: "initial_temp" value { number_value: 270.0 } } + hparams { key: "string_hparam" value { string_value: "a string" } } + metric_values { + name { tag: "current_temp" } + value: 10 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } value: 15 + training_step: 2 + wall_time_secs: 10.0 + } + metric_values { name { tag: "optional_metric" } value: 33 + training_step: 20 + wall_time_secs: 2.0 + } + sessions { + name: "session_1" + start_time_secs: 314159 + end_time_secs: 314164 + status: STATUS_SUCCESS + metric_values { + name { tag: "current_temp" } + value: 10 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { + name { tag: "delta_temp" } + value: 15 + training_step: 2 + wall_time_secs: 10.0 + } - metric_values { - name { tag: "optional_metric" } - value: 33 - training_step: 20 - wall_time_secs: 2.0 - } - } - } - session_groups { - name: "group_2" - hparams { key: "bool_hparam" value { bool_value: false } } - hparams { key: "final_temp" value { number_value: 100.0 } } - hparams { key: "initial_temp" value { number_value: 280.0 } } - hparams { key: "string_hparam" value { string_value: "AAAAA"}} - metric_values { - name { tag: "current_temp" } - value: 51.0 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { - name { tag: "delta_temp" } - value: 44.5 - training_step: 2 - wall_time_secs: 10.3333333 - } - sessions { - name: "session_2" - start_time_secs: 314159 - end_time_secs: 314164 - status: STATUS_SUCCESS - metric_values { - name { tag: "current_temp" } - value: 100 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { name { tag: "delta_temp" } - value: 150 - training_step: 3 - wall_time_secs: 11.0 - } - } - sessions { - name: "session_3" - start_time_secs: 314159 - end_time_secs: 314164 - status: STATUS_FAILURE - metric_values { - name { tag: "current_temp" } - value: 1.0 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { name { tag: "delta_temp" } - value: 1.5 - training_step: 2 - wall_time_secs: 10.0 + metric_values { + name { tag: "optional_metric" } + value: 33 + training_step: 20 + wall_time_secs: 2.0 + } + } } - } - sessions { - name: "session_5" - start_time_secs: 314159 - end_time_secs: 314164 - status: STATUS_SUCCESS - metric_values { - name { tag: "current_temp" } - value: 52.0 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { name { tag: "delta_temp" } - value: -18 - training_step: 2 - wall_time_secs: 10.0 - } - } - } - session_groups { - name: "group_3" - hparams { key: "bool_hparam" value { bool_value: true } } - hparams { key: "final_temp" value { number_value: 120.0 } } - hparams { key: "initial_temp" value { number_value: 300.0 } } - hparams { key: "string_hparam" value { string_value: "a string_3"}} - hparams { - key: 'optional_string_hparam' value { string_value: 'BB' } - } - metric_values { - name { tag: "current_temp" } - value: 101.0 - training_step: 1 - wall_time_secs: 1.0 - } - metric_values { name { tag: "delta_temp" } value: -151.0 - training_step: 2 - wall_time_secs: 10.0 - } - sessions { - name: "session_4" - start_time_secs: 314159 - end_time_secs: 314164 - status: STATUS_UNKNOWN - metric_values { - name { tag: "current_temp" } - value: 101.0 - training_step: 1 - wall_time_secs: 1.0 + session_groups { + name: "group_2" + hparams { key: "bool_hparam" value { bool_value: false } } + hparams { key: "final_temp" value { number_value: 100.0 } } + hparams { key: "initial_temp" value { number_value: 280.0 } } + hparams { key: "string_hparam" value { string_value: "AAAAA"}} + metric_values { + name { tag: "current_temp" } + value: 51.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { + name { tag: "delta_temp" } + value: 44.5 + training_step: 2 + wall_time_secs: 10.3333333 + } + sessions { + name: "session_2" + start_time_secs: 314159 + end_time_secs: 314164 + status: STATUS_SUCCESS + metric_values { + name { tag: "current_temp" } + value: 100 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } + value: 150 + training_step: 3 + wall_time_secs: 11.0 + } + } + sessions { + name: "session_3" + start_time_secs: 314159 + end_time_secs: 314164 + status: STATUS_FAILURE + metric_values { + name { tag: "current_temp" } + value: 1.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } + value: 1.5 + training_step: 2 + wall_time_secs: 10.0 + } + } + sessions { + name: "session_5" + start_time_secs: 314159 + end_time_secs: 314164 + status: STATUS_SUCCESS + metric_values { + name { tag: "current_temp" } + value: 52.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } + value: -18 + training_step: 2 + wall_time_secs: 10.0 + } + } } - metric_values { name { tag: "delta_temp" } value: -151.0 - training_step: 2 - wall_time_secs: 10.0 + session_groups { + name: "group_3" + hparams { key: "bool_hparam" value { bool_value: true } } + hparams { key: "final_temp" value { number_value: 120.0 } } + hparams { key: "initial_temp" value { number_value: 300.0 } } + hparams { key: "string_hparam" value { string_value: "a string_3"}} + hparams { + key: 'optional_string_hparam' value { string_value: 'BB' } + } + metric_values { + name { tag: "current_temp" } + value: 101.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } value: -151.0 + training_step: 2 + wall_time_secs: 10.0 + } + sessions { + name: "session_4" + start_time_secs: 314159 + end_time_secs: 314164 + status: STATUS_UNKNOWN + metric_values { + name { tag: "current_temp" } + value: 101.0 + training_step: 1 + wall_time_secs: 1.0 + } + metric_values { name { tag: "delta_temp" } value: -151.0 + training_step: 2 + wall_time_secs: 10.0 + } + } } - } - } - total_size: 3 - """, + total_size: 3 + """, response, ) def test_no_allowed_statuses(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [] - aggregation_type: AGGREGATION_AVG - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [] + aggregation_type: AGGREGATION_AVG + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups), 0) def test_some_allowed_statuses(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, STATUS_SUCCESS] - aggregation_type: AGGREGATION_AVG - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [STATUS_UNKNOWN, STATUS_SUCCESS] + aggregation_type: AGGREGATION_AVG + """ response = self._run_handler(request) self.assertEquals( _reduce_to_names(response.session_groups), @@ -528,11 +530,11 @@ def test_some_allowed_statuses(self): def test_some_allowed_statuses_empty_groups(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_FAILURE] - aggregation_type: AGGREGATION_AVG - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [STATUS_FAILURE] + aggregation_type: AGGREGATION_AVG + """ response = self._run_handler(request) self.assertEquals( _reduce_to_names(response.session_groups), @@ -541,182 +543,220 @@ def test_some_allowed_statuses_empty_groups(self): def test_aggregation_median_current_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MEDIAN - aggregation_metric: { tag: "current_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MEDIAN + aggregation_metric: { tag: "current_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 52.0 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 52.0 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: -18.0 - training_step: 2 - wall_time_secs: 10.0""", + """ + name { tag: "delta_temp" } + value: -18.0 + training_step: 2 + wall_time_secs: 10.0 + """, response.session_groups[1].metric_values[1], ) def test_aggregation_median_delta_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MEDIAN - aggregation_metric: { tag: "delta_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MEDIAN + aggregation_metric: { tag: "delta_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 1.0 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 1.0 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: 1.5 - training_step: 2 - wall_time_secs: 10.0""", + """ + name { tag: "delta_temp" } + value: 1.5 + training_step: 2 + wall_time_secs: 10.0 + """, response.session_groups[1].metric_values[1], ) def test_aggregation_max_current_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MAX - aggregation_metric: { tag: "current_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MAX + aggregation_metric: { tag: "current_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 100 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 100 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: 150.0 - training_step: 3 - wall_time_secs: 11.0""", + """ + name { tag: "delta_temp" } + value: 150.0 + training_step: 3 + wall_time_secs: 11.0 + """, response.session_groups[1].metric_values[1], ) def test_aggregation_max_delta_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MAX - aggregation_metric: { tag: "delta_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MAX + aggregation_metric: { tag: "delta_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 100.0 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 100.0 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: 150.0 - training_step: 3 - wall_time_secs: 11.0""", + """ + name { tag: "delta_temp" } + value: 150.0 + training_step: 3 + wall_time_secs: 11.0 + """, response.session_groups[1].metric_values[1], ) def test_aggregation_min_current_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MIN - aggregation_metric: { tag: "current_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MIN + aggregation_metric: { tag: "current_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 1.0 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 1.0 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: 1.5 - training_step: 2 - wall_time_secs: 10.0""", + """ + name { tag: "delta_temp" } + value: 1.5 + training_step: 2 + wall_time_secs: 10.0 + """, response.session_groups[1].metric_values[1], ) def test_aggregation_min_delta_temp(self): request = """ - start_index: 0 - slice_size: 3 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - aggregation_type: AGGREGATION_MIN - aggregation_metric: { tag: "delta_temp" } - """ + start_index: 0 + slice_size: 3 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + aggregation_type: AGGREGATION_MIN + aggregation_metric: { tag: "delta_temp" } + """ response = self._run_handler(request) self.assertEquals(len(response.session_groups[1].metric_values), 2) self.assertProtoEquals( - """name { tag: "current_temp" } - value: 52.0 - training_step: 1 - wall_time_secs: 1.0""", + """ + name { tag: "current_temp" } + value: 52.0 + training_step: 1 + wall_time_secs: 1.0 + """, response.session_groups[1].metric_values[0], ) self.assertProtoEquals( - """name { tag: "delta_temp" } - value: -18.0 - training_step: 2 - wall_time_secs: 10.0""", + """ + name { tag: "delta_temp" } + value: -18.0 + training_step: 2 + wall_time_secs: 10.0 + """, response.session_groups[1].metric_values[1], ) def test_no_filter_no_sort_partial_slice(self): self._verify_handler( request=""" - start_index: 1 - slice_size: 1 - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - """, + start_index: 1 + slice_size: 1 + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + """, expected_session_group_names=["group_2"], expected_total_size=3, ) @@ -724,17 +764,19 @@ def test_no_filter_no_sort_partial_slice(self): def test_no_filter_exclude_missing_values(self): self._verify_handler( request=""" - col_params: { - metric: { tag: 'optional_metric' } - exclude_missing_values: true - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'optional_metric' } + exclude_missing_values: true + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1"], expected_total_size=1, ) @@ -742,34 +784,38 @@ def test_no_filter_exclude_missing_values(self): def test_filter_regexp(self): self._verify_handler( request=""" - col_params: { - hparam: 'string_hparam' - filter_regexp: 'AA' - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'string_hparam' + filter_regexp: 'AA' + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_2"], expected_total_size=1, ) # Test filtering out all session groups. self._verify_handler( request=""" - col_params: { - hparam: 'string_hparam' - filter_regexp: 'a string_100' - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'string_hparam' + filter_regexp: 'a string_100' + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=[], expected_total_size=0, ) @@ -777,17 +823,19 @@ def test_filter_regexp(self): def test_filter_interval(self): self._verify_handler( request=""" - col_params: { - hparam: 'initial_temp' - filter_interval: { min_value: 270 max_value: 282 } - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'initial_temp' + filter_interval: { min_value: 270 max_value: 282 } + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1", "group_2"], expected_total_size=2, ) @@ -795,18 +843,20 @@ def test_filter_interval(self): def test_filter_discrete_set(self): self._verify_handler( request=""" - col_params: { - metric: { tag: 'current_temp' } - filter_discrete: { values: [{ number_value: 101.0 }, - { number_value: 10.0 }] } - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'current_temp' } + filter_discrete: { values: [{ number_value: 101.0 }, + { number_value: 10.0 }] } + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1", "group_3"], expected_total_size=2, ) @@ -814,22 +864,24 @@ def test_filter_discrete_set(self): def test_filter_multiple_columns(self): self._verify_handler( request=""" - col_params: { - metric: { tag: 'current_temp' } - filter_discrete: { values: [{ number_value: 101.0 }, - { number_value: 10.0 }] } - } - col_params: { - hparam: 'initial_temp' - filter_interval: { min_value: 270 max_value: 282 } - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'current_temp' } + filter_discrete: { values: [{ number_value: 101.0 }, + { number_value: 10.0 }] } + } + col_params: { + hparam: 'initial_temp' + filter_interval: { min_value: 270 max_value: 282 } + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1"], expected_total_size=1, ) @@ -837,53 +889,59 @@ def test_filter_multiple_columns(self): def test_filter_single_column_with_missing_values(self): self._verify_handler( request=""" - col_params: { - hparam: 'optional_string_hparam' - filter_regexp: 'B' - exclude_missing_values: true - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'optional_string_hparam' + filter_regexp: 'B' + exclude_missing_values: true + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_3"], expected_total_size=1, ) self._verify_handler( request=""" - col_params: { - hparam: 'optional_string_hparam' - filter_regexp: 'B' - exclude_missing_values: false - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'optional_string_hparam' + filter_regexp: 'B' + exclude_missing_values: false + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1", "group_2", "group_3"], expected_total_size=3, ) self._verify_handler( request=""" - col_params: { - metric: { tag: 'optional_metric' } - filter_discrete: { values: { number_value: 33.0 } } - exclude_missing_values: true - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'optional_metric' } + filter_discrete: { values: { number_value: 33.0 } } + exclude_missing_values: true + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1"], expected_total_size=1, ) @@ -891,50 +949,56 @@ def test_filter_single_column_with_missing_values(self): def test_sort_one_column(self): self._verify_handler( request=""" - col_params: { - metric: { tag: 'delta_temp' } - order: ORDER_ASC - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'delta_temp' } + order: ORDER_ASC + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_3", "group_1", "group_2"], expected_total_size=3, ) self._verify_handler( request=""" - col_params: { - hparam: 'string_hparam' - order: ORDER_ASC - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'string_hparam' + order: ORDER_ASC + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_2", "group_1", "group_3"], expected_total_size=3, ) # Test descending order. self._verify_handler( request=""" - col_params: { - hparam: 'string_hparam' - order: ORDER_DESC - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'string_hparam' + order: ORDER_DESC + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_3", "group_1", "group_2"], expected_total_size=3, ) @@ -942,42 +1006,46 @@ def test_sort_one_column(self): def test_sort_multiple_columns(self): self._verify_handler( request=""" - col_params: { - hparam: 'bool_hparam' - order: ORDER_ASC - } - col_params: { - metric: { tag: 'delta_temp' } - order: ORDER_ASC - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'bool_hparam' + order: ORDER_ASC + } + col_params: { + metric: { tag: 'delta_temp' } + order: ORDER_ASC + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_2", "group_3", "group_1"], expected_total_size=3, ) # Primary key in descending order. Secondary key in ascending order. self._verify_handler( request=""" - col_params: { - hparam: 'bool_hparam' - order: ORDER_DESC - } - col_params: { - metric: { tag: 'delta_temp' } - order: ORDER_ASC - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'bool_hparam' + order: ORDER_DESC + } + col_params: { + metric: { tag: 'delta_temp' } + order: ORDER_ASC + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_3", "group_1", "group_2"], expected_total_size=3, ) @@ -985,69 +1053,77 @@ def test_sort_multiple_columns(self): def test_sort_one_column_with_missing_values(self): self._verify_handler( request=""" - col_params: { - metric: { tag: 'optional_metric' } - order: ORDER_ASC - missing_values_first: false - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'optional_metric' } + order: ORDER_ASC + missing_values_first: false + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1", "group_2", "group_3"], expected_total_size=3, ) self._verify_handler( request=""" - col_params: { - metric: { tag: 'optional_metric' } - order: ORDER_ASC - missing_values_first: true - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + metric: { tag: 'optional_metric' } + order: ORDER_ASC + missing_values_first: true + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_2", "group_3", "group_1"], expected_total_size=3, ) self._verify_handler( request=""" - col_params: { - hparam: 'optional_string_hparam' - order: ORDER_ASC - missing_values_first: false - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'optional_string_hparam' + order: ORDER_ASC + missing_values_first: false + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_3", "group_1", "group_2"], expected_total_size=3, ) self._verify_handler( request=""" - col_params: { - hparam: 'optional_string_hparam' - order: ORDER_ASC - missing_values_first: true - } - allowed_statuses: [STATUS_UNKNOWN, - STATUS_SUCCESS, - STATUS_FAILURE, - STATUS_RUNNING] - start_index: 0 - slice_size: 3 - """, + col_params: { + hparam: 'optional_string_hparam' + order: ORDER_ASC + missing_values_first: true + } + allowed_statuses: [ + STATUS_UNKNOWN, + STATUS_SUCCESS, + STATUS_FAILURE, + STATUS_RUNNING + ] + start_index: 0 + slice_size: 3 + """, expected_session_group_names=["group_1", "group_2", "group_3"], expected_total_size=3, ) diff --git a/tensorboard/plugins/hparams/summary_v2.py b/tensorboard/plugins/hparams/summary_v2.py index 3f85ac57b2..19a4421e0d 100644 --- a/tensorboard/plugins/hparams/summary_v2.py +++ b/tensorboard/plugins/hparams/summary_v2.py @@ -538,21 +538,22 @@ def __init__( dataset_type=None, ): """ - Args: - tag: The tag name of the scalar summary that corresponds to this - metric (as a `str`). - group: An optional string listing the subdirectory under the - session's log directory containing summaries for this metric. - For instance, if summaries for training runs are written to - events files in `ROOT_LOGDIR/SESSION_ID/train`, then `group` - should be `"train"`. Defaults to the empty string: i.e., - summaries are expected to be written to the session logdir. - display_name: An optional human-readable display name. - description: An optional Markdown string with a human-readable - description of this metric, to appear in TensorBoard. - dataset_type: Either `Metric.TRAINING` or `Metric.VALIDATION`, or - `None`. - """ + + Args: + tag: The tag name of the scalar summary that corresponds to this + metric (as a `str`). + group: An optional string listing the subdirectory under the + session's log directory containing summaries for this metric. + For instance, if summaries for training runs are written to + events files in `ROOT_LOGDIR/SESSION_ID/train`, then `group` + should be `"train"`. Defaults to the empty string: i.e., + summaries are expected to be written to the session logdir. + display_name: An optional human-readable display name. + description: An optional Markdown string with a human-readable + description of this metric, to appear in TensorBoard. + dataset_type: Either `Metric.TRAINING` or `Metric.VALIDATION`, or + `None`. + """ self._tag = tag self._group = group self._display_name = display_name diff --git a/tensorboard/plugins/hparams/summary_v2_test.py b/tensorboard/plugins/hparams/summary_v2_test.py index a0e10a9a11..0eafde2611 100644 --- a/tensorboard/plugins/hparams/summary_v2_test.py +++ b/tensorboard/plugins/hparams/summary_v2_test.py @@ -87,13 +87,13 @@ def setUp(self): self.expected_session_start_pb = plugin_data_pb2.SessionStartInfo() text_format.Merge( """ - hparams { key: "learning_rate" value { number_value: 0.02 } } - hparams { key: "dense_layers" value { number_value: 5 } } - hparams { key: "optimizer" value { string_value: "adam" } } - hparams { key: "who_knows_what" value { string_value: "???" } } - hparams { key: "magic" value { bool_value: true } } - hparams { key: "dropout" value { number_value: 0.3 } } - """, + hparams { key: "learning_rate" value { number_value: 0.02 } } + hparams { key: "dense_layers" value { number_value: 5 } } + hparams { key: "optimizer" value { string_value: "adam" } } + hparams { key: "who_knows_what" value { string_value: "???" } } + hparams { key: "magic" value { bool_value: true } } + hparams { key: "dropout" value { number_value: 0.3 } } + """, self.expected_session_start_pb, ) self.expected_session_start_pb.group_name = self.trial_id @@ -306,74 +306,74 @@ def setUp(self): self.expected_experiment_pb = api_pb2.Experiment() text_format.Merge( """ - time_created_secs: 1555624767.0 - hparam_infos { - name: "learning_rate" - type: DATA_TYPE_FLOAT64 - domain_interval { - min_value: 0.01 - max_value: 0.1 - } - } - hparam_infos { - name: "dense_layers" - type: DATA_TYPE_FLOAT64 - domain_interval { - min_value: 2 - max_value: 7 - } - } - hparam_infos { - name: "optimizer" - type: DATA_TYPE_STRING - domain_discrete { - values { - string_value: "adam" + time_created_secs: 1555624767.0 + hparam_infos { + name: "learning_rate" + type: DATA_TYPE_FLOAT64 + domain_interval { + min_value: 0.01 + max_value: 0.1 + } } - values { - string_value: "sgd" + hparam_infos { + name: "dense_layers" + type: DATA_TYPE_FLOAT64 + domain_interval { + min_value: 2 + max_value: 7 + } } - } - } - hparam_infos { - name: "who_knows_what" - } - hparam_infos { - name: "magic" - type: DATA_TYPE_BOOL - display_name: "~*~ Magic ~*~" - description: "descriptive" - domain_discrete { - values { - bool_value: false + hparam_infos { + name: "optimizer" + type: DATA_TYPE_STRING + domain_discrete { + values { + string_value: "adam" + } + values { + string_value: "sgd" + } + } } - values { - bool_value: true + hparam_infos { + name: "who_knows_what" } - } - } - metric_infos { - name { - tag: "samples_per_second" - } - } - metric_infos { - name { - group: "train" - tag: "batch_loss" - } - display_name: "loss (train)" - } - metric_infos { - name { - group: "validation" - tag: "epoch_accuracy" - } - display_name: "accuracy (val.)" - description: "Accuracy on the _validation_ dataset." - dataset_type: DATASET_VALIDATION - } - """, + hparam_infos { + name: "magic" + type: DATA_TYPE_BOOL + display_name: "~*~ Magic ~*~" + description: "descriptive" + domain_discrete { + values { + bool_value: false + } + values { + bool_value: true + } + } + } + metric_infos { + name { + tag: "samples_per_second" + } + } + metric_infos { + name { + group: "train" + tag: "batch_loss" + } + display_name: "loss (train)" + } + metric_infos { + name { + group: "validation" + tag: "epoch_accuracy" + } + display_name: "accuracy (val.)" + description: "Accuracy on the _validation_ dataset." + dataset_type: DATASET_VALIDATION + } + """, self.expected_experiment_pb, ) diff --git a/tensorboard/plugins/image/images_plugin.py b/tensorboard/plugins/image/images_plugin.py index a07b83d37e..a5b2901814 100644 --- a/tensorboard/plugins/image/images_plugin.py +++ b/tensorboard/plugins/image/images_plugin.py @@ -83,11 +83,11 @@ def is_active(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT 1 - FROM Tags - WHERE Tags.plugin_name = ? - LIMIT 1 - """, + SELECT 1 + FROM Tags + WHERE Tags.plugin_name = ? + LIMIT 1 + """, (metadata.PLUGIN_NAME,), ) return bool(list(cursor)) @@ -105,25 +105,25 @@ def _index_impl(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - Runs.run_name, - Tags.tag_name, - Tags.display_name, - Descriptions.description, - /* Subtract 2 for leading width and height elements. */ - MAX(CAST (Tensors.shape AS INT)) - 2 AS samples - FROM Tags - JOIN Runs USING (run_id) - JOIN Tensors ON Tags.tag_id = Tensors.series - LEFT JOIN Descriptions ON Tags.tag_id = Descriptions.id - WHERE Tags.plugin_name = :plugin - /* Shape should correspond to a rank-1 tensor. */ - AND NOT INSTR(Tensors.shape, ',') - /* Required to use TensorSeriesStepIndex. */ - AND Tensors.step IS NOT NULL - GROUP BY Tags.tag_id - HAVING samples >= 1 - """, + SELECT + Runs.run_name, + Tags.tag_name, + Tags.display_name, + Descriptions.description, + /* Subtract 2 for leading width and height elements. */ + MAX(CAST (Tensors.shape AS INT)) - 2 AS samples + FROM Tags + JOIN Runs USING (run_id) + JOIN Tensors ON Tags.tag_id = Tensors.series + LEFT JOIN Descriptions ON Tags.tag_id = Descriptions.id + WHERE Tags.plugin_name = :plugin + /* Shape should correspond to a rank-1 tensor. */ + AND NOT INSTR(Tensors.shape, ',') + /* Required to use TensorSeriesStepIndex. */ + AND Tensors.step IS NOT NULL + GROUP BY Tags.tag_id + HAVING samples >= 1 + """, {"plugin": metadata.PLUGIN_NAME}, ) result = collections.defaultdict(dict) @@ -209,30 +209,30 @@ def _image_response_for_run(self, run, tag, sample): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - computed_time, - step, - CAST (T0.data AS INT) AS width, - CAST (T1.data AS INT) AS height - FROM Tensors - JOIN TensorStrings AS T0 - ON Tensors.rowid = T0.tensor_rowid - JOIN TensorStrings AS T1 - ON Tensors.rowid = T1.tensor_rowid - WHERE - series = ( - SELECT tag_id - FROM Runs - CROSS JOIN Tags USING (run_id) - WHERE Runs.run_name = :run AND Tags.tag_name = :tag) - AND step IS NOT NULL - AND dtype = :dtype - /* Should be n-vector, n >= 3: [width, height, samples...] */ - AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3) - AND T0.idx = 0 - AND T1.idx = 1 - ORDER BY step - """, + SELECT + computed_time, + step, + CAST (T0.data AS INT) AS width, + CAST (T1.data AS INT) AS height + FROM Tensors + JOIN TensorStrings AS T0 + ON Tensors.rowid = T0.tensor_rowid + JOIN TensorStrings AS T1 + ON Tensors.rowid = T1.tensor_rowid + WHERE + series = ( + SELECT tag_id + FROM Runs + CROSS JOIN Tags USING (run_id) + WHERE Runs.run_name = :run AND Tags.tag_name = :tag) + AND step IS NOT NULL + AND dtype = :dtype + /* Should be n-vector, n >= 3: [width, height, samples...] */ + AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3) + AND T0.idx = 0 + AND T1.idx = 1 + ORDER BY step + """, {"run": run, "tag": tag, "dtype": tf.string.as_datatype_enum}, ) return [ @@ -320,30 +320,30 @@ def _get_individual_image(self, run, tag, index, sample): db = self._db_connection_provider() cursor = db.execute( """ - SELECT data - FROM TensorStrings - WHERE - /* Skip first 2 elements which are width and height. */ - idx = 2 + :sample - AND tensor_rowid = ( - SELECT rowid - FROM Tensors - WHERE - series = ( - SELECT tag_id - FROM Runs - CROSS JOIN Tags USING (run_id) - WHERE - Runs.run_name = :run - AND Tags.tag_name = :tag) - AND step IS NOT NULL - AND dtype = :dtype - /* Should be n-vector, n >= 3: [width, height, samples...] */ - AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3) - ORDER BY step - LIMIT 1 - OFFSET :index) - """, + SELECT data + FROM TensorStrings + WHERE + /* Skip first 2 elements which are width and height. */ + idx = 2 + :sample + AND tensor_rowid = ( + SELECT rowid + FROM Tensors + WHERE + series = ( + SELECT tag_id + FROM Runs + CROSS JOIN Tags USING (run_id) + WHERE + Runs.run_name = :run + AND Tags.tag_name = :tag) + AND step IS NOT NULL + AND dtype = :dtype + /* Should be n-vector, n >= 3: [width, height, samples...] */ + AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3) + ORDER BY step + LIMIT 1 + OFFSET :index) + """, { "run": run, "tag": tag, diff --git a/tensorboard/plugins/interactive_inference/witwidget/notebook/base.py b/tensorboard/plugins/interactive_inference/witwidget/notebook/base.py index 98a9ee0454..1f22f6ea72 100644 --- a/tensorboard/plugins/interactive_inference/witwidget/notebook/base.py +++ b/tensorboard/plugins/interactive_inference/witwidget/notebook/base.py @@ -147,7 +147,8 @@ def wrapped_compare_custom_predict_fn(examples): def _get_element_html(self): return """ - """ + + """ def set_examples(self, examples): """Sets the examples shown in WIT. diff --git a/tensorboard/plugins/interactive_inference/witwidget/notebook/colab/wit.py b/tensorboard/plugins/interactive_inference/witwidget/notebook/colab/wit.py index 8a1e0c6243..18657d8c31 100644 --- a/tensorboard/plugins/interactive_inference/witwidget/notebook/colab/wit.py +++ b/tensorboard/plugins/interactive_inference/witwidget/notebook/colab/wit.py @@ -279,7 +279,8 @@ def __init__(self, config_builder, height=1000): def _get_element_html(self): return """ - """ + -1 - ORDER BY Tensors.step - """ + SELECT + Runs.run_name, + Tensors.step, + Tensors.computed_time, + Tensors.data, + Tensors.dtype, + Tensors.shape, + Tags.plugin_data + FROM Tensors + JOIN Tags + ON Tensors.series = Tags.tag_id + JOIN Runs + ON Tags.run_id = Runs.run_id + WHERE + Runs.run_name IN (%s) + AND Tags.tag_name = ? + AND Tags.plugin_name = ? + AND Tensors.step > -1 + ORDER BY Tensors.step + """ % ",".join(["?"] * len(runs)), runs + [tag, metadata.PLUGIN_NAME], ) @@ -212,16 +212,16 @@ def tags_impl(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - Tags.tag_name, - Tags.display_name, - Runs.run_name - FROM Tags - JOIN Runs - ON Tags.run_id = Runs.run_id - WHERE - Tags.plugin_name = ? - """, + SELECT + Tags.tag_name, + Tags.display_name, + Runs.run_name + FROM Tags + JOIN Runs + ON Tags.run_id = Runs.run_id + WHERE + Tags.plugin_name = ? + """, (metadata.PLUGIN_NAME,), ) result = {} @@ -282,26 +282,26 @@ def available_time_entries_impl(self): # For each run, pick a tag. cursor = db.execute( """ - SELECT - TagPickingTable.run_name, - Tensors.step, - Tensors.computed_time - FROM (/* For each run, pick any tag. */ - SELECT - Runs.run_id AS run_id, - Runs.run_name AS run_name, - Tags.tag_id AS tag_id - FROM Runs - JOIN Tags - ON Tags.run_id = Runs.run_id - WHERE - Tags.plugin_name = ? - GROUP BY Runs.run_id) AS TagPickingTable - JOIN Tensors - ON Tensors.series = TagPickingTable.tag_id - WHERE Tensors.step IS NOT NULL - ORDER BY Tensors.step - """, + SELECT + TagPickingTable.run_name, + Tensors.step, + Tensors.computed_time + FROM (/* For each run, pick any tag. */ + SELECT + Runs.run_id AS run_id, + Runs.run_name AS run_name, + Tags.tag_id AS tag_id + FROM Runs + JOIN Tags + ON Tags.run_id = Runs.run_id + WHERE + Tags.plugin_name = ? + GROUP BY Runs.run_id) AS TagPickingTable + JOIN Tensors + ON Tensors.series = TagPickingTable.tag_id + WHERE Tensors.step IS NOT NULL + ORDER BY Tensors.step + """, (metadata.PLUGIN_NAME,), ) for (run, step, wall_time) in cursor: @@ -372,11 +372,11 @@ def is_active(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT 1 - FROM Tags - WHERE Tags.plugin_name = ? - LIMIT 1 - """, + SELECT 1 + FROM Tags + WHERE Tags.plugin_name = ? + LIMIT 1 + """, (metadata.PLUGIN_NAME,), ) return bool(list(cursor)) diff --git a/tensorboard/plugins/profile/trace_events_json_test.py b/tensorboard/plugins/profile/trace_events_json_test.py index 1157a5c3c7..7839eb897f 100644 --- a/tensorboard/plugins/profile/trace_events_json_test.py +++ b/tensorboard/plugins/profile/trace_events_json_test.py @@ -39,39 +39,39 @@ def testJsonConversion(self): self.assertEqual( self.convert( """ - devices { key: 2 value { - name: 'D2' - device_id: 2 - resources { key: 2 value { - resource_id: 2 - name: 'R2.2' - } } - } } - devices { key: 1 value { - name: 'D1' - device_id: 1 - resources { key: 2 value { - resource_id: 1 - name: 'R1.2' - } } - } } + devices { key: 2 value { + name: 'D2' + device_id: 2 + resources { key: 2 value { + resource_id: 2 + name: 'R2.2' + } } + } } + devices { key: 1 value { + name: 'D1' + device_id: 1 + resources { key: 2 value { + resource_id: 1 + name: 'R1.2' + } } + } } - trace_events { - device_id: 1 - resource_id: 2 - name: "E1.2.1" - timestamp_ps: 100000 - duration_ps: 10000 - args { key: "label" value: "E1.2.1" } - args { key: "extra" value: "extra info" } - } - trace_events { - device_id: 2 - resource_id: 2 - name: "E2.2.1" - timestamp_ps: 105000 - } - """ + trace_events { + device_id: 1 + resource_id: 2 + name: "E1.2.1" + timestamp_ps: 100000 + duration_ps: 10000 + args { key: "label" value: "E1.2.1" } + args { key: "extra" value: "extra info" } + } + trace_events { + device_id: 2 + resource_id: 2 + name: "E2.2.1" + timestamp_ps: 105000 + } + """ ), dict( displayTimeUnit="ns", diff --git a/tensorboard/plugins/scalar/scalars_plugin.py b/tensorboard/plugins/scalar/scalars_plugin.py index 4004abbaed..2564ab4273 100644 --- a/tensorboard/plugins/scalar/scalars_plugin.py +++ b/tensorboard/plugins/scalar/scalars_plugin.py @@ -84,12 +84,12 @@ def is_active(self): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - 1 - FROM Tags - WHERE Tags.plugin_name = ? - LIMIT 1 - """, + SELECT + 1 + FROM Tags + WHERE Tags.plugin_name = ? + LIMIT 1 + """, (metadata.PLUGIN_NAME,), ) return bool(list(cursor)) @@ -128,16 +128,16 @@ def index_impl(self, experiment=None): db = self._db_connection_provider() cursor = db.execute( """ - SELECT - Tags.tag_name, - Tags.display_name, - Runs.run_name - FROM Tags - JOIN Runs - ON Tags.run_id = Runs.run_id - WHERE - Tags.plugin_name = ? - """, + SELECT + Tags.tag_name, + Tags.display_name, + Runs.run_name + FROM Tags + JOIN Runs + ON Tags.run_id = Runs.run_id + WHERE + Tags.plugin_name = ? + """, (metadata.PLUGIN_NAME,), ) result = collections.defaultdict(dict) @@ -193,27 +193,27 @@ def scalars_impl(self, tag, run, experiment, output_format): # placeholder rows en masse. The check for step filters out those rows. cursor = db.execute( """ - SELECT - Tensors.step, - Tensors.computed_time, - Tensors.data, - Tensors.dtype - FROM Tensors - JOIN Tags - ON Tensors.series = Tags.tag_id - JOIN Runs - ON Tags.run_id = Runs.run_id - WHERE - /* For backwards compatibility, ignore the experiment id - for matching purposes if it is empty. */ - (:exp == '' OR Runs.experiment_id == CAST(:exp AS INT)) - AND Runs.run_name = :run - AND Tags.tag_name = :tag - AND Tags.plugin_name = :plugin - AND Tensors.shape = '' - AND Tensors.step > -1 - ORDER BY Tensors.step - """, + SELECT + Tensors.step, + Tensors.computed_time, + Tensors.data, + Tensors.dtype + FROM Tensors + JOIN Tags + ON Tensors.series = Tags.tag_id + JOIN Runs + ON Tags.run_id = Runs.run_id + WHERE + /* For backwards compatibility, ignore the experiment id + for matching purposes if it is empty. */ + (:exp == '' OR Runs.experiment_id == CAST(:exp AS INT)) + AND Runs.run_name = :run + AND Tags.tag_name = :tag + AND Tags.plugin_name = :plugin + AND Tensors.shape = '' + AND Tensors.step > -1 + ORDER BY Tensors.step + """, dict( exp=experiment, run=run, diff --git a/tensorboard/plugins/text/text_plugin_test.py b/tensorboard/plugins/text/text_plugin_test.py index 52c744ca71..5f77dbb1b2 100644 --- a/tensorboard/plugins/text/text_plugin_test.py +++ b/tensorboard/plugins/text/text_plugin_test.py @@ -110,22 +110,23 @@ def testText(self): table, textwrap.dedent( """\ -one |
-
two |
-
three |
-
four |
-
one |
+
two |
+
three |
+
four |
+
| one | -two | -
| three | -four | -
| one | +two | +
| three | +four | +
| c1 | -c2 | -
|---|---|
| one | -two | -
| three | -four | -
| c1 | +c2 | +
|---|---|
| one | +two | +
| three | +four | +
| one | -
| two | -
| three | -
| four | -
| five | -
| one | +
| two | +
| three | +
| four | +
| five | +
| X | -
|---|
| one | -
| two | -
| three | -
| four | -
| five | -
| X | +
|---|
| one | +
| two | +
| three | +
| four | +
| five | +
foo |
-
bar |
-
foo |
+
bar |
+
foo |
- bar |
-
zoink |
- zod |
-
foo |
+ bar |
+
zoink |
+ zod |
+
foo |
- bar |
-
zoink |
- zod |
-
foo |
+ bar |
+
zoink |
+ zod |
+