From f6262d937e29a8859a85c9ad67daa5ba81d22723 Mon Sep 17 00:00:00 2001 From: E Date: Mon, 14 Sep 2020 18:41:22 -0700 Subject: [PATCH 1/4] [oss-timeseries] migrate metrics backend --- tensorboard/plugins/metrics/BUILD | 58 ++ tensorboard/plugins/metrics/http_api.md | 324 ++++++++ tensorboard/plugins/metrics/metadata.py | 22 + tensorboard/plugins/metrics/metrics_loader.py | 36 + tensorboard/plugins/metrics/metrics_plugin.py | 576 +++++++++++++++ .../plugins/metrics/metrics_plugin_test.py | 695 ++++++++++++++++++ 6 files changed, 1711 insertions(+) create mode 100644 tensorboard/plugins/metrics/BUILD create mode 100644 tensorboard/plugins/metrics/http_api.md create mode 100644 tensorboard/plugins/metrics/metadata.py create mode 100644 tensorboard/plugins/metrics/metrics_loader.py create mode 100644 tensorboard/plugins/metrics/metrics_plugin.py create mode 100644 tensorboard/plugins/metrics/metrics_plugin_test.py diff --git a/tensorboard/plugins/metrics/BUILD b/tensorboard/plugins/metrics/BUILD new file mode 100644 index 0000000000..b24ffc108a --- /dev/null +++ b/tensorboard/plugins/metrics/BUILD @@ -0,0 +1,58 @@ +# Description: +# TensorBoard plugin for metrics (scalars, images, histograms, distributions) + +package(default_visibility = ["//tensorboard:internal"]) + +licenses(["notice"]) + +exports_files(["LICENSE"]) + +py_library( + name = "metrics_plugin", + srcs = [ + "metadata.py", + "metrics_plugin.py", + ], + srcs_version = "PY3", + deps = [ + "//tensorboard:errors", + "//tensorboard:plugin_util", + "//tensorboard/backend:http_util", + "//tensorboard/data:provider", + "//tensorboard/plugins:base_plugin", + "//tensorboard/plugins/histogram:metadata", + "//tensorboard/plugins/image:metadata", + "//tensorboard/plugins/scalar:metadata", + "@org_pocoo_werkzeug", + ], +) + +py_library( + name = "metadata", + srcs = ["metadata.py"], + srcs_version = "PY3", +) + +py_test( + name = "metrics_plugin_test", + size = "medium", + srcs = ["metrics_plugin_test.py"], + main = "metrics_plugin_test.py", + python_version = "PY3", + srcs_version = "PY3", + deps = [ + ":metrics_plugin", + "//tensorboard:context", + "//tensorboard:expect_tensorflow_installed", + "//tensorboard/backend:application", + "//tensorboard/backend/event_processing:data_provider", + "//tensorboard/backend/event_processing:event_accumulator", + "//tensorboard/backend/event_processing:event_multiplexer", + "//tensorboard/data:provider", + "//tensorboard/plugins:base_plugin", + "//tensorboard/plugins/image:metadata", + "//tensorboard/summary:tf_summary", + "//tensorboard/util:test_util", + "@org_pocoo_werkzeug", + ], +) diff --git a/tensorboard/plugins/metrics/http_api.md b/tensorboard/plugins/metrics/http_api.md new file mode 100644 index 0000000000..bac8814337 --- /dev/null +++ b/tensorboard/plugins/metrics/http_api.md @@ -0,0 +1,324 @@ +# Metrics plugin HTTP API + +This backend exposes summary data related to "metrics". This includes Scalar, +Histogram, Image data. + + +### Type `RunToTags` +Type: {[run: string]: string[]} + +Map from run name to a list of tag names. + +### Type `TagToDescription` +Type: {[tag: string]: string} + +Map from tag name to a description string. + +### Type `NonSampledTagMetadata` +Type: Object + +Metadata for tags associated with a non-sampled type plugin. + +Properties: + - runTagInfo: RunToTags + - tagDescriptions: TagToDescription + +### Type `SampledTagMetadata` +Type: Object + +Metadata for tags associated with a sampled type plugin. + +Properties: + - tagDescriptions: TagToDescription + - tagRunSampledInfo: TagToRunSampledInfo + +### Type `SampledTimeSeriesInfo` +Type: Object + +Metadata associated with a time series generated from a sampled plugin. + +Properties: + - maxSamplesPerStep: number + - The maximum datum count at any step in the time series. Note that the + actual number of samples may differ at each step. + +### Type `TagToRunSampledInfo` +Type: {[tag: string]: {[run: string]: SampledTimeSeriesInfo}} + +Map from tag name to a map from run name to sampled time series info. + +### Type `PluginType` +Type: string enum + - SCALARS: 'scalars' + - HISTOGRAMS: 'histograms' + - IMAGES: 'images' + +### Type `SingleRunPlugin` +Type: PluginType + +Plugins of this type require a single run to be specified when requesting +time series data. Non-single-run plugins are not required to specify a run. + +### Type `SampledPlugin` +Type: PluginType + +Plugins of this type are associated with sampled time series. Sampled time +series may contain multiple samples of data at each step. + +### Type `TagMetadata` +Type: Object + +Properties: + - `[PluginType.SCALARS]`: NonSampledTagMetadata + - `[PluginType.HISTOGRAMS]`: NonSampledTagMetadata + - `[PluginType.IMAGES]`: SampledTagMetadata + +### Type `TimeSeriesRequest` +Type: Object + +Request for time series data, which may correspond to at most one +TimeSeriesResponse in a successful case. Backends may handle requests +differently depending on the plugin, or ignore certain plugins completely. +In the future, this may be extended with options for filtering and sampling. + +Properties: + - plugin: PluginType + - tag: string + - run: optional string + - The name of a requested run, required when plugin is a `SingleRunPlugin`. + - sample: optional number + - The zero-indexed sample, required when plugin is a `SampledPlugin`. + +### Type `RunToSeries` +Type: {[run: string]: ScalarStepDatum[]}| + {[run: string]: HistogramStepDatum[]}| + {[run: string]: ImageStepDatum[]} + +Map from run name to a list time series data sorted by step. + +### Type `TimeSeriesSuccessfulResponse` +Type: Object + +Response from the backend containing time series data for a TimeSeriesRequest. +The value of `plugin` determines the type of values in the `runToSeries` dict. +For example, if plugin is `scalars`, then series will be a list of +`ScalarStepDatum`. + +Properties: + - plugin: PluginType + - tag: string + - run: optional string + - The name of a requested run, required when plugin is a `SingleRunPlugin`. + - sample: optional number + - The zero-indexed sample, required when plugin is a `SampledPlugin`. + - runToSeries: RunToSeries + +### Type `TimeSeriesFailedResponse` +Type: Object + +Response from the backend for a TimeSeriesRequest that failed to get data. + +Properties: + - plugin: PluginType + - tag: string + - run: optional string + - The name of a requested run, required when plugin is a `SingleRunPlugin`. + - sample: optional number + - The zero-indexed sample, required when plugin is a `SampledPlugin`. + - error: string + - The error reason. + +### Type `TimeSeriesResponse` +Type: TimeSeriesSuccessfulResponse|TimeSeriesFailedResponse + +Response from the backend containing time series data for a TimeSeriesRequest. + +### Type `ScalarStepDatum` +Type: Object + +Datum for a single step in a scalar time series. + +Properties: + - step: number + - The global step at which this datum occurred; an integer. This is a unique + key among data of this time series. + - wallTime: number + - The real-world time at which this datum occurred, as float seconds since + epoch. + - value: number + - The scalar value for this datum; a float. + +### Type `HistogramBin` +Type: Object + +Single bin in a histogram, describing the number of items in a value range. + +Properties + - min: number + - The smaller value of the bin's range. + - max: number + - The larger value of the bin's range. + - count: number + - The integer number of items in the bin. + +### Type `HistogramStepDatum` +Type: Object + +Datum for a single step in a histogram time series. + +Properties: + - step: number + - The global step at which this datum occurred; an integer. This is a unique + key among data of this time series. + - wallTime: number + - The real-world time at which this datum occurred, as float seconds since + epoch. + - bins: HistogramBin[] + - The histogram contents, as a list of HistogramBins. Bins must be sorted + by increasing 'min' value, and ranges must not overlap. + +### Type `ImageStepDatum` +Type: Object + +Datum for a single run+tag+sample+step in a image time series. This does not +contain actual image contents. See `ImageData` for contents of a single image. + +Properties: + - step: number + - The global step at which this datum occurred; an integer. This is a unique + key among data of this time series. + - wallTime: number + - The real-world time at which this datum occurred, as float seconds since + epoch. + - imageId: ImageId + - A unique id for the image data. + +### Type `ImageData` +Type: string + +A bytestring of raw image bytes. + +### Type `ImageId` +Type: string + +A unique reference to identify a single image. + +### Route `/data/plugin/timeseries/tags` + +Returns tag metadata for a given experiment's logged metrics. Tag descriptions +may be produced by combining several descriptions for the same tag across +multiple runs. + +Args: + - experiment_id: optional string + - ID of the request's experiment. + +Returns: + - TagMetadata + +Example: + + Response: + { + "histograms": { + "runTagInfo": { + "test_run": ["ages"] + }, + "tagDescriptions": { + "ages": "

a distribution of Walrus ages

" + }, + }, + "images": { + "tagDescriptions": { + "images/tagA": "

Initial digits

", + "images/tagB": "

Reshaped digits

", + }, + "tagRunSampledInfo": { + "images/tagA": { + "run1": {"samples": 1} + }, + "images/tagB": { + "run1": {"samples": 2}, + "run2": {"samples": 3}, + }, + }, + }, + "scalars": { + "runTagInfo": {"test_run": ["eval/population"]}, + "tagDescriptions": { + "eval/population": "

the most valuable statistic

" + }, + }, + } + +### Route `/data/plugin/timeseries/timeSeries` (POST) + +Responds to a list of requests for time series data. A list of requests may +cover multiple tags across multiple runs with different with data produced by +different plugins. Responses may be in any order. +Clients may wish to call this using tag names returned from a calling /tags. + +Args: + - experiment_id: string + - string ID of the request's experiment. + - requests: TimeSeriesRequest[] + +Returns: + - TimeSeriesResponse[] + +Example: + + Arguments: + { + requests: [ + {"plugin": "scalars", "tag": "eval/population"}, + {"plugin": "histograms", "tag": "ages"}, + {"plugin": "images", "tag": "faces", "sample": 2}, + ] + } + + Response: + [ + { + "plugin": "scalars" + "tag": "eval/population" + "runToSeries": { + "run1": [ + {wallTime: 1550634693, step: 100, value: 7}, + {wallTime: 1550634899, step: 200, value: 8}, + ] + }, + { + "plugin": "histograms" + "tag": "population" + "runToSeries": { + "run1": [ + { + wallTime: 1550634693, + step: 100, + value: [[0, 0.5, 9], [1, 0.5, 10], [10, 0.5, 10], ...]}, + ] + }, + { + "plugin": "images" + "tag": "faces" + "sample": 2, + "runToSeries": { + "run1": [ + {wallTime: 1550634693, step: 100, imageId: "..."}, + {wallTime: 1550634899, step: 200, imageId: "..."}, + ], + } + }, + ] + +### Route `/data/plugin/timeseries/imageData` + +Returns an image's data. Instead of reading the raw data, clients may rely +on this endpoint URL as an HTMLImageElement's 'src' attribute. + +Args: + - imageId: ImageId + +Returns: + - Image data diff --git a/tensorboard/plugins/metrics/metadata.py b/tensorboard/plugins/metrics/metadata.py new file mode 100644 index 0000000000..44b767d311 --- /dev/null +++ b/tensorboard/plugins/metrics/metadata.py @@ -0,0 +1,22 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Internal information about the metrics plugin.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +PLUGIN_NAME = "timeseries" diff --git a/tensorboard/plugins/metrics/metrics_loader.py b/tensorboard/plugins/metrics/metrics_loader.py new file mode 100644 index 0000000000..742e82b982 --- /dev/null +++ b/tensorboard/plugins/metrics/metrics_loader.py @@ -0,0 +1,36 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The TensorBoard metrics plugin loader.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensorboard.plugins import base_plugin +from tensorboard.plugins import metrics_plugin + + +class MetricsLoader(base_plugin.TBLoader): + """The loader for MetricsPlugin.""" + + def load(self, context): + """Loads or skips the plugin during setup phase. + + Args: + context: The TBContext instance. + """ + if not context._data_provider: + return None + return metrics_plugin.MetricsPlugin(context) diff --git a/tensorboard/plugins/metrics/metrics_plugin.py b/tensorboard/plugins/metrics/metrics_plugin.py new file mode 100644 index 0000000000..f952f9f4d4 --- /dev/null +++ b/tensorboard/plugins/metrics/metrics_plugin.py @@ -0,0 +1,576 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The TensorBoard metrics plugin.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import imghdr +import json + +from werkzeug import wrappers + +from tensorboard import errors +from tensorboard import plugin_util +from tensorboard.backend import http_util +from tensorboard.data import provider +from tensorboard.plugins import base_plugin +from tensorboard.plugins.histogram import ( + metadata as histogram_metadata,) +from tensorboard.plugins.image import ( + metadata as image_metadata,) +from tensorboard.plugins.metrics import metadata +from tensorboard.plugins.scalar import ( + metadata as scalar_metadata,) + + +_IMGHDR_TO_MIMETYPE = { + "bmp": "image/bmp", + "gif": "image/gif", + "jpeg": "image/jpeg", + "png": "image/png", + "svg": "image/svg+xml", +} + +_DEFAULT_IMAGE_MIMETYPE = "application/octet-stream" + +_SINGLE_RUN_PLUGINS = frozenset([ + histogram_metadata.PLUGIN_NAME, image_metadata.PLUGIN_NAME +]) + +_SAMPLED_PLUGINS = frozenset([image_metadata.PLUGIN_NAME]) + + +def _get_tag_description_info(mapping): + """Gets maps from tags to descriptions, and descriptions to runs. + + Args: + mapping: a nested map `d` such that `d[run][tag]` is a time series + produced by DataProvider's `list_*` methods. + + Returns: + A tuple containing + tag_to_descriptions: A map from tag strings to a set of description + strings. + description_to_runs: A map from description strings to a set of run + strings. + """ + tag_to_descriptions = collections.defaultdict(set) + description_to_runs = collections.defaultdict(set) + for (run, tag_to_content) in mapping.items(): + for (tag, metadatum) in tag_to_content.items(): + description = metadatum.description + if len(description): + tag_to_descriptions[tag].add(description) + description_to_runs[description].add(run) + + return tag_to_descriptions, description_to_runs + + +def _build_combined_description(descriptions, description_to_runs): + """Creates a single description from a set of descriptions. + + Descriptions may be composites when a single tag has different descriptions + across multiple runs. + + Args: + descriptions: A list of description strings. + description_to_runs: A map from description strings to a set of run + strings. + + Returns: + The combined description string. + """ + prefixed_descriptions = [] + for description in descriptions: + runs = sorted(description_to_runs[description]) + run_or_runs = "runs" if len(runs) > 1 else "run" + run_header = "## For " + run_or_runs + ": " + ", ".join(runs) + description_html = run_header + "\n" + description + prefixed_descriptions.append(description_html) + + header = "# Multiple descriptions\n" + return header + "\n".join(prefixed_descriptions) + + +def _get_tag_to_description(mapping): + """Returns a map of tags to descriptions. + + Args: + mapping: a nested map `d` such that `d[run][tag]` is a time series + produced by DataProvider's `list_*` methods. + + Returns: + A map from tag strings to description HTML strings. E.g. + { + "loss": "

Multiple descriptions

For runs: test, train +

...

", + "loss2": "

The lossy details

", + } + """ + tag_to_descriptions, description_to_runs = _get_tag_description_info(mapping) + + result = {} + for tag in tag_to_descriptions: + descriptions = sorted(tag_to_descriptions[tag]) + if len(descriptions) == 1: + description = descriptions[0] + else: + description = _build_combined_description(descriptions, description_to_runs) + result[tag] = plugin_util.markdown_to_safe_html(description) + + return result + + +def _get_run_tag_info(mapping): + """Returns a map of run names to a list of tag names. + + Args: + mapping: a nested map `d` such that `d[run][tag]` is a time series + produced by DataProvider's `list_*` methods. + + Returns: + A map from run strings to a list of tag strings. E.g. + {"loss001a": ["actor/loss", "critic/loss"], ...} + """ + return {run: sorted(mapping[run]) for run in mapping} + + +def _format_basic_mapping(mapping): + """Prepares a scalar or histogram mapping for client consumption. + + Args: + mapping: a nested map `d` such that `d[run][tag]` is a time series + produced by DataProvider's `list_*` methods. + + Returns: + A dict with the following fields: + runTagInfo: the return type of `_get_run_tag_info` + tagDescriptions: the return type of `_get_tag_to_description` + """ + return { + "runTagInfo": _get_run_tag_info(mapping), + "tagDescriptions": _get_tag_to_description(mapping), + } + + +def _format_image_blob_sequence_datum(sorted_datum_list, sample): + """Formats image metadata from a list of BlobSequenceDatum's for clients. + + This expects that frontend clients need to access images based on the + run+tag+sample. + + Args: + sorted_datum_list: a list of DataProvider's `BlobSequenceDatum`, sorted by + step. This can be produced via DataProvider's `read_blob_sequences`. + sample: zero-indexed integer for the requested sample. + + Returns: + A list of `ImageStepDatum` (see http_api.md). + """ + # For images, ignore the first 2 items of a BlobSequenceDatum's values, which + # correspond to width, height. + index = sample + 2 + step_data = [] + for datum in sorted_datum_list: + if len(datum.values) <= index: + continue + + step_data.append({ + "step": datum.step, + "wallTime": datum.wall_time, + "imageId": datum.values[index].blob_key, + }) + return step_data + + +def _get_tag_run_image_info(mapping): + """Returns a map of tag names to run information. + + Args: + mapping: the result of DataProvider's `list_blob_sequences`. + + Returns: + A nested map from run strings to tag string to image info, where image + info is an object of form {"maxSamplesPerStep": num}. For example, + { + "reshaped": { + "test": {"maxSamplesPerStep": 1}, + "train": {"maxSamplesPerStep": 1} + }, + "convolved": {"test": {"maxSamplesPerStep": 50}}, + } + """ + tag_run_image_info = collections.defaultdict(dict) + for (run, tag_to_content) in mapping.items(): + for (tag, metadatum) in tag_to_content.items(): + tag_run_image_info[tag][run] = { + "maxSamplesPerStep": metadatum.max_length - 2 # width, height + } + return dict(tag_run_image_info) + + +def _format_image_mapping(mapping): + """Prepares an image mapping for client consumption. + + Args: + mapping: the result of DataProvider's `list_blob_sequences`. + + Returns: + A dict with the following fields: + tagRunSampledInfo: the return type of `_get_tag_run_image_info` + tagDescriptions: the return type of `_get_tag_description_info` + """ + return { + "tagDescriptions": _get_tag_to_description(mapping), + "tagRunSampledInfo": _get_tag_run_image_info(mapping), + } + + +class MetricsPlugin(base_plugin.TBPlugin): + """Metrics Plugin for TensorBoard.""" + + plugin_name = metadata.PLUGIN_NAME + + def __init__(self, context): + """Instantiates MetricsPlugin. + + Args: + context: A base_plugin.TBContext instance. MetricsLoader checks that + it contains a valid `data_provider`. + """ + self._data_provider = context.data_provider + + # For histograms, use a round number + 1 since sampling includes both start + # and end steps, so N+1 samples corresponds to dividing the step sequence + # into N intervals. + sampling_hints = context.sampling_hints or {} + self._plugin_downsampling = { + "scalars": + sampling_hints.get(scalar_metadata.PLUGIN_NAME, 1000), + "histograms": + sampling_hints.get(histogram_metadata.PLUGIN_NAME, 51), + "images": + sampling_hints.get(image_metadata.PLUGIN_NAME, 10), + } + + def frontend_metadata(self): + return base_plugin.FrontendMetadata( + is_ng_component=True, tab_name="Time Series" + ) + + def get_plugin_apps(self): + return { + "/tags": self._serve_tags, + "/timeSeries": self._serve_time_series, + "/imageData": self._serve_image_data, + } + + def data_plugin_names(self): + return (scalar_metadata.PLUGIN_NAME, histogram_metadata.PLUGIN_NAME) + + def is_active(self): + return False # 'data_plugin_names' suffices. + + @wrappers.Request.application + def _serve_tags(self, request): + ctx = plugin_util.context(request.environ) + experiment = plugin_util.experiment_id(request.environ) + index = self._tags_impl(ctx, experiment=experiment) + return http_util.Respond(request, index, "application/json") + + def _tags_impl(self, ctx, experiment=None): + """Returns tag metadata for a given experiment's logged metrics. + + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: optional string ID of the request's experiment. + + Returns: + A nested dict 'd' with keys in ("scalars", "histograms", "images") + and values being the return type of _format_*mapping. + """ + scalar_mapping = self._data_provider.list_scalars( + ctx, + experiment_id=experiment, + plugin_name=scalar_metadata.PLUGIN_NAME, + ) + histogram_mapping = self._data_provider.list_tensors( + ctx, + experiment_id=experiment, + plugin_name=histogram_metadata.PLUGIN_NAME, + ) + image_mapping = self._data_provider.list_blob_sequences( + ctx, + experiment_id=experiment, + plugin_name=image_metadata.PLUGIN_NAME, + ) + + result = {} + result["scalars"] = _format_basic_mapping(scalar_mapping) + result["histograms"] = _format_basic_mapping(histogram_mapping) + result["images"] = _format_image_mapping(image_mapping) + return result + + @wrappers.Request.application + def _serve_time_series(self, request): + ctx = plugin_util.context(request.environ) + experiment = plugin_util.experiment_id(request.environ) + series_requests_string = request.form.get("requests") + if not series_requests_string: + raise errors.InvalidArgumentError("Missing 'requests' field") + try: + series_requests = json.loads(series_requests_string) + except ValueError: + raise errors.InvalidArgumentError("Unable to parse 'requests' as JSON") + + response = self._time_series_impl(ctx, experiment, series_requests) + return http_util.Respond(request, response, "application/json") + + def _time_series_impl(self, ctx, experiment, series_requests): + """Constructs a list of responses from a list of series requests. + + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: string ID of the request's experiment. + series_requests: a list of `TimeSeriesRequest` dicts (see http_api.md). + + Returns: + A list of `TimeSeriesResponse` dicts (see http_api.md). + """ + responses = [ + self._get_time_series(ctx, experiment, request) + for request in series_requests + ] + return responses + + def _create_base_response(self, series_request): + tag = series_request.get("tag") + run = series_request.get("run") + plugin = series_request.get("plugin") + sample = series_request.get("sample") + response = {"plugin": plugin, "tag": tag} + if isinstance(run, str): + response["run"] = run + if isinstance(sample, int): + response["sample"] = sample + + return response + + def _get_invalid_request_error(self, series_request): + tag = series_request.get("tag") + plugin = series_request.get("plugin") + run = series_request.get("run") + sample = series_request.get("sample") + + if not isinstance(tag, str): + return "Missing tag" + + if (plugin != scalar_metadata.PLUGIN_NAME and + plugin != histogram_metadata.PLUGIN_NAME and + plugin != image_metadata.PLUGIN_NAME): + return "Invalid plugin" + + if plugin in _SINGLE_RUN_PLUGINS and not isinstance(run, str): + return "Missing run" + + if plugin in _SAMPLED_PLUGINS and not isinstance(sample, int): + return "Missing sample" + + return None + + def _get_time_series(self, ctx, experiment, series_request): + """Returns time series data for a given tag, plugin. + + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: string ID of the request's experiment. + series_request: a `TimeSeriesRequest` (see http_api.md). + + Returns: + A `TimeSeriesResponse` dict (see http_api.md). + """ + tag = series_request.get("tag") + run = series_request.get("run") + plugin = series_request.get("plugin") + sample = series_request.get("sample") + response = self._create_base_response(series_request) + request_error = self._get_invalid_request_error(series_request) + if request_error: + response["error"] = request_error + return response + + runs = [run] if run else None + run_to_series = None + if plugin == scalar_metadata.PLUGIN_NAME: + run_to_series = self._get_run_to_scalar_series(ctx, experiment, tag, runs) + + if plugin == histogram_metadata.PLUGIN_NAME: + run_to_series = self._get_run_to_histogram_series( + ctx, experiment, tag, runs) + + if plugin == image_metadata.PLUGIN_NAME: + run_to_series = self._get_run_to_image_series( + ctx, experiment, tag, sample, runs) + + response["runToSeries"] = run_to_series + return response + + def _get_run_to_scalar_series(self, ctx, experiment, tag, runs): + """Builds a run-to-scalar-series dict for client consumption. + + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: a string experiment id. + tag: string of the requested tag. + runs: optional list of run names as strings. + + Returns: + A map from string run names to `ScalarStepDatum` (see http_api.md). + """ + mapping = self._data_provider.read_scalars( + ctx, + experiment_id=experiment, + plugin_name=scalar_metadata.PLUGIN_NAME, + downsample=self._plugin_downsampling["scalars"], + run_tag_filter=provider.RunTagFilter(runs=runs, tags=[tag]), + ) + + run_to_series = {} + for (result_run, tag_data) in mapping.items(): + if tag not in tag_data: + continue + values = [{ + "wallTime": datum.wall_time, + "step": datum.step, + "value": datum.value + } for datum in tag_data[tag]] + run_to_series[result_run] = values + + return run_to_series + + + def _format_histogram_datum_bins(self, datum): + """Formats a histogram datum's bins for client consumption. + + Args: + datum: a DataProvider's TensorDatum. + + Returns: + A list of `HistogramBin`s (see http_api.md). + """ + numpy_list = datum.numpy.tolist() + bins = [{"min": x[0], "max": x[1], "count": x[2]} for x in numpy_list] + return bins + + + def _get_run_to_histogram_series(self, ctx, experiment, tag, runs): + """Builds a run-to-histogram-series dict for client consumption. + + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: a string experiment id. + tag: string of the requested tag. + runs: optional list of run names as strings. + + Returns: + A map from string run names to `HistogramStepDatum` (see http_api.md). + """ + mapping = self._data_provider.read_tensors( + ctx, + experiment_id=experiment, + plugin_name=histogram_metadata.PLUGIN_NAME, + downsample=self._plugin_downsampling["histograms"], + run_tag_filter=provider.RunTagFilter(runs=runs, tags=[tag]), + ) + + run_to_series = {} + for (result_run, tag_data) in mapping.items(): + if tag not in tag_data: + continue + values = [{ + "wallTime": datum.wall_time, + "step": datum.step, + "bins": self._format_histogram_datum_bins(datum) + } for datum in tag_data[tag]] + run_to_series[result_run] = values + + return run_to_series + + + def _get_run_to_image_series(self, ctx, experiment, tag, sample, runs): + """Builds a run-to-image-series dict for client consumption. + + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: a string experiment id. + tag: string of the requested tag. + sample: zero-indexed integer for the requested sample. + runs: optional list of run names as strings. + + Returns: + A `RunToSeries` dict (see http_api.md). + """ + mapping = self._data_provider.read_blob_sequences( + ctx, + experiment_id=experiment, + plugin_name=image_metadata.PLUGIN_NAME, + downsample=self._plugin_downsampling["images"], + run_tag_filter=provider.RunTagFilter(runs, tags=[tag]), + ) + + run_to_series = {} + for (result_run, tag_data) in mapping.items(): + if tag not in tag_data: + continue + blob_sequence_datum_list = tag_data[tag] + series = _format_image_blob_sequence_datum( + blob_sequence_datum_list, sample) + if series: + run_to_series[result_run] = series + + return run_to_series + + @wrappers.Request.application + def _serve_image_data(self, request): + """Serves an individual image.""" + ctx = plugin_util.context(request.environ) + blob_key = request.args["imageId"] + if not blob_key: + raise errors.InvalidArgumentError("Missing 'imageId' field") + + (data, content_type) = self._image_data_impl(ctx, blob_key) + return http_util.Respond(request, data, content_type) + + def _image_data_impl(self, ctx, blob_key): + """Gets the image data for a blob key. + + Args: + ctx: A `tensorboard.context.RequestContext` value. + blob_key: a string identifier for a DataProvider blob. + + Returns: + A tuple containing: + data: a raw bytestring of the requested image's contents. + content_type: a string HTTP content type. + """ + data = self._data_provider.read_blob(ctx, blob_key=blob_key) + image_type = imghdr.what(None, data) + content_type = _IMGHDR_TO_MIMETYPE.get( + image_type, _DEFAULT_IMAGE_MIMETYPE + ) + return (data, content_type) diff --git a/tensorboard/plugins/metrics/metrics_plugin_test.py b/tensorboard/plugins/metrics/metrics_plugin_test.py new file mode 100644 index 0000000000..22ae6c3011 --- /dev/null +++ b/tensorboard/plugins/metrics/metrics_plugin_test.py @@ -0,0 +1,695 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Integration tests for the Metrics Plugin.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import collections +import functools +import os.path +import unittest + +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf + +from tensorboard import context +from tensorboard.backend.event_processing import ( + data_provider,) +from tensorboard.backend.event_processing import ( + plugin_event_accumulator as event_accumulator,) +from tensorboard.backend.event_processing import ( + plugin_event_multiplexer as event_multiplexer,) +from tensorboard.data import provider +from tensorboard.plugins import base_plugin +from tensorboard.plugins.image import ( + metadata as image_metadata,) +from tensorboard.plugins.metrics import ( + metrics_plugin,) +from tensorboard.util import test_util + +tf1.enable_eager_execution() + + +class MetricsPluginTest(tf.test.TestCase): + + def setUp(self): + super(MetricsPluginTest, self).setUp() + self._logdir = self.get_temp_dir() + self._multiplexer = event_multiplexer.EventMultiplexer() + + flags = argparse.Namespace(generic_data="true") + provider = data_provider.MultiplexerDataProvider(self._multiplexer, + self._logdir) + ctx = base_plugin.TBContext( + flags=flags, + logdir=self._logdir, + multiplexer=self._multiplexer, + data_provider=provider, + ) + self._plugin = metrics_plugin.MetricsPlugin(ctx) + + ### Writing utilities. + + def _write_scalar(self, run, tag, description=None): + subdir = os.path.join(self._logdir, run) + writer = tf.summary.create_file_writer(subdir) + + with writer.as_default(): + tf.summary.scalar(tag, 42, step=0, description=description) + writer.flush() + self._multiplexer.AddRunsFromDirectory(self._logdir) + + def _write_scalar_data(self, run, tag, data=[]): + """Writes scalar data, starting at step 0. + + Args: + run: string run name. + tag: string tag name. + data: list of scalar values to write at each step. + """ + subdir = os.path.join(self._logdir, run) + writer = tf.summary.create_file_writer(subdir) + + with writer.as_default(): + step = 0 + for datum in data: + tf.summary.scalar(tag, datum, step=step) + step += 1 + writer.flush() + self._multiplexer.AddRunsFromDirectory(self._logdir) + + def _write_histogram(self, run, tag, description=None): + subdir = os.path.join(self._logdir, run) + writer = tf.summary.create_file_writer(subdir) + + with writer.as_default(): + data = tf.random.normal(shape=[3]) + tf.summary.histogram(tag, data, step=0, description=description) + writer.flush() + self._multiplexer.AddRunsFromDirectory(self._logdir) + + def _write_histogram_data(self, run, tag, data=[]): + """Writes histogram data, starting at step 0. + + Args: + run: string run name. + tag: string tag name. + data: list of histogram values to write at each step. + """ + subdir = os.path.join(self._logdir, run) + writer = tf.summary.create_file_writer(subdir) + + with writer.as_default(): + step = 0 + for datum in data: + tf.summary.histogram(tag, datum, step=step) + step += 1 + writer.flush() + self._multiplexer.AddRunsFromDirectory(self._logdir) + + def _write_image(self, run, tag, samples=2, description=None): + subdir = os.path.join(self._logdir, run) + writer = tf.summary.create_file_writer(subdir) + + with writer.as_default(): + data = tf.random.normal(shape=[samples, 8, 8, 1]) + tf.summary.image( + tag, data, step=0, max_outputs=samples, description=description) + writer.flush() + self._multiplexer.AddRunsFromDirectory(self._logdir) + + ### Misc utilities. + + def _clean_time_series_responses(self, responses): + """Cleans non-deterministic data from a TimeSeriesResponse, in place.""" + for response in responses: + run_to_series = response.get("runToSeries", {}) + for (run, series) in run_to_series.items(): + for datum in series: + if "wallTime" in datum: + datum["wallTime"] = "" + + # Clean images. + run_to_image_series = response.get("runToSeries", {}) + for (run, series) in run_to_image_series.items(): + for datum in series: + if "wallTime" in datum: + datum["wallTime"] = "" + if "imageId" in datum: + datum["imageId"] = "" + + return responses + + def _get_image_blob_key(self, run, tag, step=0, sample=0): + """Returns a single image's blob_key after it has been written.""" + mapping = self._plugin._data_provider.read_blob_sequences( + context.RequestContext(), + experiment_id="expid", + plugin_name=image_metadata.PLUGIN_NAME, + downsample=10, + run_tag_filter=provider.RunTagFilter(tags=[tag]), + ) + blob_sequence_datum = mapping[run][tag][step] + # For images, the first 2 datum values are ignored. + return blob_sequence_datum.values[2 + sample].blob_key + + + ### Actual tests. + + def test_routes_provided(self): + """Tests that the plugin offers the correct routes.""" + routes = self._plugin.get_plugin_apps() + self.assertIsInstance(routes["/tags"], collections.Callable) + + def test_tags_empty(self): + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + expected_tags = { + "runTagInfo": {}, + "tagDescriptions": {}, + } + self.assertEqual(expected_tags, response["scalars"]) + self.assertEqual(expected_tags, response["histograms"]) + self.assertEqual({ + "tagDescriptions": {}, + "tagRunSampledInfo": {}, + }, response["images"]) + + def test_tags(self): + self._write_scalar("run1", "scalars/tagA", None) + self._write_scalar("run1", "scalars/tagA", None) + self._write_scalar("run1", "scalars/tagB", None) + self._write_scalar("run2", "scalars/tagB", None) + self._write_histogram("run1", "histograms/tagA", None) + self._write_histogram("run1", "histograms/tagA", None) + self._write_histogram("run1", "histograms/tagB", None) + self._write_histogram("run2", "histograms/tagB", None) + self._write_image("run1", "images/tagA", 1, None) + self._write_image("run1", "images/tagA", 2, None) + self._write_image("run1", "images/tagB", 3, None) + self._write_image("run2", "images/tagB", 4, None) + + self._multiplexer.Reload() + + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + self.assertEqual( + { + "runTagInfo": { + "run1": ["scalars/tagA", "scalars/tagB"], + "run2": ["scalars/tagB"], + }, + "tagDescriptions": {}, + }, + response["scalars"], + ) + self.assertEqual( + { + "runTagInfo": { + "run1": ["histograms/tagA", "histograms/tagB"], + "run2": ["histograms/tagB"], + }, + "tagDescriptions": {}, + }, + response["histograms"], + ) + self.assertEqual( + { + "tagDescriptions": {}, + "tagRunSampledInfo": { + "images/tagA": { + "run1": { + "maxSamplesPerStep": 2 + } + }, + "images/tagB": { + "run1": { + "maxSamplesPerStep": 3 + }, + "run2": { + "maxSamplesPerStep": 4 + }, + }, + }, + }, + response["images"], + ) + + def test_tags_with_descriptions(self): + self._write_scalar("run1", "scalars/tagA", "Describing tagA") + self._write_scalar("run1", "scalars/tagB", "Describing tagB") + self._write_scalar("run2", "scalars/tagB", "Describing tagB") + self._write_histogram("run1", "histograms/tagA", "Describing tagA") + self._write_histogram("run1", "histograms/tagB", "Describing tagB") + self._write_histogram("run2", "histograms/tagB", "Describing tagB") + self._write_image("run1", "images/tagA", 1, "Describing tagA") + self._write_image("run1", "images/tagB", 2, "Describing tagB") + self._write_image("run2", "images/tagB", 3, "Describing tagB") + self._multiplexer.Reload() + + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + self.assertEqual( + { + "runTagInfo": { + "run1": ["scalars/tagA", "scalars/tagB"], + "run2": ["scalars/tagB"], + }, + "tagDescriptions": { + "scalars/tagA": "

Describing tagA

", + "scalars/tagB": "

Describing tagB

", + }, + }, + response["scalars"], + ) + self.assertEqual( + { + "runTagInfo": { + "run1": ["histograms/tagA", "histograms/tagB"], + "run2": ["histograms/tagB"], + }, + "tagDescriptions": { + "histograms/tagA": "

Describing tagA

", + "histograms/tagB": "

Describing tagB

", + }, + }, + response["histograms"], + ) + self.assertEqual( + { + "tagDescriptions": { + "images/tagA": "

Describing tagA

", + "images/tagB": "

Describing tagB

", + }, + "tagRunSampledInfo": { + "images/tagA": { + "run1": { + "maxSamplesPerStep": 1 + } + }, + "images/tagB": { + "run1": { + "maxSamplesPerStep": 2 + }, + "run2": { + "maxSamplesPerStep": 3 + }, + }, + }, + }, + response["images"], + ) + + def test_tags_conflicting_description(self): + self._write_scalar("run1", "scalars/tagA", None) + self._write_scalar("run2", "scalars/tagA", "tagA is hot") + self._write_scalar("run3", "scalars/tagA", "tagA is cold") + self._write_scalar("run4", "scalars/tagA", "tagA is cold") + self._write_histogram("run1", "histograms/tagA", None) + self._write_histogram("run2", "histograms/tagA", "tagA is hot") + self._write_histogram("run3", "histograms/tagA", "tagA is cold") + self._write_histogram("run4", "histograms/tagA", "tagA is cold") + self._multiplexer.Reload() + + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + expected_composite_description = ("

Multiple descriptions

\n" + "

For runs: run3, run4

\n" + "

tagA is cold

\n" + "

For run: run2

\n" + "

tagA is hot

") + self.assertEqual( + {"scalars/tagA": expected_composite_description}, + response["scalars"]["tagDescriptions"], + ) + self.assertEqual( + {"histograms/tagA": expected_composite_description}, + response["histograms"]["tagDescriptions"], + ) + + def test_tags_unsafe_description(self): + self._write_scalar("<&#run>", "scalars/<&#tag>", "<&#description>") + self._write_histogram("<&#run>", "histograms/<&#tag>", "<&#description>") + self._multiplexer.Reload() + + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + self.assertEqual( + {"scalars/<&#tag>": "

<&#description>

"}, + response["scalars"]["tagDescriptions"], + ) + self.assertEqual( + {"histograms/<&#tag>": "

<&#description>

"}, + response["histograms"]["tagDescriptions"], + ) + + def test_tags_unsafe_conflicting_description(self): + self._write_scalar("<&#run1>", "scalars/<&#tag>", None) + self._write_scalar("<&#run2>", "scalars/<&#tag>", "<&# is hot>") + self._write_scalar("<&#run3>", "scalars/<&#tag>", "<&# is cold>") + self._write_scalar("<&#run4>", "scalars/<&#tag>", "<&# is cold>") + self._write_histogram("<&#run1>", "histograms/<&#tag>", None) + self._write_histogram("<&#run2>", "histograms/<&#tag>", "<&# is hot>") + self._write_histogram("<&#run3>", "histograms/<&#tag>", "<&# is cold>") + self._write_histogram("<&#run4>", "histograms/<&#tag>", "<&# is cold>") + self._multiplexer.Reload() + + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + expected_composite_description = ( + "

Multiple descriptions

\n" + "

For runs: <&#run3>, <&#run4>

\n" + "

<&# is cold>

\n" + "

For run: <&#run2>

\n" + "

<&# is hot>

") + self.assertEqual( + {"scalars/<&#tag>": expected_composite_description}, + response["scalars"]["tagDescriptions"], + ) + self.assertEqual( + {"histograms/<&#tag>": expected_composite_description}, + response["histograms"]["tagDescriptions"], + ) + + def test_time_series_scalar(self): + self._write_scalar_data("run1", "scalars/tagA", [0, 100, -200]) + self._multiplexer.Reload() + + requests = [{"plugin": "scalars", "tag": "scalars/tagA"}] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [{ + "plugin": "scalars", + "tag": "scalars/tagA", + "runToSeries": { + "run1": [ + { + "wallTime": "", + "step": 0, + "value": 0.0, + }, + { + "wallTime": "", + "step": 1, + "value": 100.0, + }, + { + "wallTime": "", + "step": 2, + "value": -200.0, + }, + ] + }, + }], + clean_response, + ) + + def test_time_series_histogram(self): + self._write_histogram_data("run1", "histograms/tagA", [0, 10]) + self._multiplexer.Reload() + + requests = [ + { + "plugin": "histograms", + "tag": "histograms/tagA", + "run": "run1" + } + ] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [{ + "plugin": "histograms", + "tag": "histograms/tagA", + "run": "run1", + "runToSeries": { + "run1": [ + { + "wallTime": "", + "step": 0, + "bins": [{"min": -0.5, "max": 0.5, "count": 1.0}], + }, + { + "wallTime": "", + "step": 1, + "bins": [{"min": 9.5, "max": 10.5, "count": 1.0}], + }, + ] + }, + }], + clean_response, + ) + + def test_time_series_unmatching_request(self): + self._write_scalar_data("run1", "scalars/tagA", [0, 100, -200]) + + self._multiplexer.Reload() + + requests = [{"plugin": "scalars", "tag": "nothing-matches"}] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual([{ + "plugin": "scalars", + "runToSeries": {}, + "tag": "nothing-matches" + }], clean_response) + + def test_time_series_multiple_runs(self): + self._write_scalar_data("run1", "scalars/tagA", [0]) + self._write_scalar_data("run2", "scalars/tagA", [1]) + self._write_scalar_data("run2", "scalars/tagB", [2]) + + self._multiplexer.Reload() + + requests = [{"plugin": "scalars", "tag": "scalars/tagA"}] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [{ + "plugin": "scalars", + "runToSeries": { + "run1": [{ + "step": 0, + "value": 0.0, + "wallTime": "", + },], + "run2": [{ + "step": 0, + "value": 1.0, + "wallTime": "", + },], + }, + "tag": "scalars/tagA", + }], + clean_response, + ) + + def test_time_series_multiple_requests(self): + self._write_scalar_data("run1", "scalars/tagA", [0]) + self._write_scalar_data("run2", "scalars/tagB", [1]) + + self._multiplexer.Reload() + + requests = [ + { + "plugin": "scalars", + "tag": "scalars/tagA" + }, + { + "plugin": "scalars", + "tag": "scalars/tagB" + }, + { + "plugin": "scalars", + "tag": "scalars/tagB" + }, + ] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [{ + "plugin": "scalars", + "runToSeries": { + "run1": [{ + "step": 0, + "value": 0.0, + "wallTime": "", + },], + }, + "tag": "scalars/tagA", + }, { + "plugin": "scalars", + "runToSeries": { + "run2": [{ + "step": 0, + "value": 1.0, + "wallTime": "", + },], + }, + "tag": "scalars/tagB", + }, { + "plugin": "scalars", + "runToSeries": { + "run2": [{ + "step": 0, + "value": 1.0, + "wallTime": "", + },], + }, + "tag": "scalars/tagB", + }], + clean_response, + ) + + def test_time_series_single_request_specific_run(self): + self._write_scalar_data("run1", "scalars/tagA", [0]) + self._write_scalar_data("run2", "scalars/tagA", [1]) + + self._multiplexer.Reload() + + requests = [{"plugin": "scalars", "tag": "scalars/tagA", "run": "run2"}] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [{ + "plugin": "scalars", + "runToSeries": { + "run2": [{ + "step": 0, + "value": 1.0, + "wallTime": "", + },], + }, + "tag": "scalars/tagA", + "run": "run2", + }], + clean_response, + ) + + def test_image_data(self): + self._write_image("run1", "images/tagA", 1, None) + self._multiplexer.Reload() + + # Get the blob_key manually. + image_id = self._get_image_blob_key("run1", "images/tagA", step=0, sample=0) + (data, content_type) = self._plugin._image_data_impl( + context.RequestContext(), image_id) + + self.assertIsInstance(data, bytes) + self.assertEqual(content_type, 'image/png') + self.assertGreater(len(data), 0) + + def test_time_series_bad_arguments(self): + requests = [ + { + "plugin": "images" + }, + { + "plugin": "unknown_plugin", + "tag": "tagA" + }, + ] + response = self._plugin._time_series_impl( + context.RequestContext(), "expid", requests) + errors = [series_response.get("error", "") for series_response in response] + + self.assertEqual(errors, ["Missing tag", "Invalid plugin"]) + + def test_image_data_from_time_series_query(self): + self._write_image("run1", "images/tagA", samples=3) + self._multiplexer.Reload() + + requests = [ + { + "plugin": "images", + "tag": "images/tagA", + "run": "run1", + "sample": 2 + } + ] + original_response = self._plugin._time_series_impl( + context.RequestContext(), "expid", requests) + response = self._plugin._time_series_impl( + context.RequestContext(), "expid", requests) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual([{ + "plugin": "images", + "tag": "images/tagA", + "run": "run1", + "sample": 2, + "runToSeries": { + "run1": [{ + "wallTime": "", + "step": 0, + "imageId": "" + }] + } + }], clean_response) + + image_id = original_response[0]["runToSeries"]["run1"][0]["imageId"] + (data, content_type) = self._plugin._image_data_impl( + context.RequestContext(), image_id) + + self.assertIsInstance(data, bytes) + self.assertGreater(len(data), 0) + + def test_image_bad_request(self): + self._write_image("run1", "images/tagA", 1, None) + self._multiplexer.Reload() + + invalid_sample = 999 + requests = [ + { + "plugin": "images", + "tag": "images/tagA", + "sample": invalid_sample, + "run": "run1" + }, + { + "plugin": "images", + "tag": "images/tagA", + "run": "run1" + }, + { + "plugin": "images", + "tag": "images/tagA", + }, + ] + response = self._plugin._time_series_impl( + context.RequestContext(), "expid", requests) + errors = [series_response.get("error", "") for series_response in response] + + self.assertEqual(errors, ["", "Missing sample", "Missing run"]) + + +if __name__ == "__main__": + tf.test.main() From a1b55a366251009f1b3a5b4d3575a2546b1e000f Mon Sep 17 00:00:00 2001 From: E Date: Tue, 15 Sep 2020 14:08:41 -0700 Subject: [PATCH 2/4] remove unused imports --- tensorboard/plugins/metrics/metrics_plugin_test.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tensorboard/plugins/metrics/metrics_plugin_test.py b/tensorboard/plugins/metrics/metrics_plugin_test.py index 22ae6c3011..9b095577d7 100644 --- a/tensorboard/plugins/metrics/metrics_plugin_test.py +++ b/tensorboard/plugins/metrics/metrics_plugin_test.py @@ -20,9 +20,7 @@ import argparse import collections -import functools import os.path -import unittest import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf @@ -30,8 +28,6 @@ from tensorboard import context from tensorboard.backend.event_processing import ( data_provider,) -from tensorboard.backend.event_processing import ( - plugin_event_accumulator as event_accumulator,) from tensorboard.backend.event_processing import ( plugin_event_multiplexer as event_multiplexer,) from tensorboard.data import provider @@ -40,7 +36,6 @@ metadata as image_metadata,) from tensorboard.plugins.metrics import ( metrics_plugin,) -from tensorboard.util import test_util tf1.enable_eager_execution() From 31c51c7d173a73ac5dcd16b7fcadb38e82f981da Mon Sep 17 00:00:00 2001 From: E Date: Tue, 15 Sep 2020 14:09:57 -0700 Subject: [PATCH 3/4] run black formatter --- tensorboard/plugins/metrics/metrics_plugin.py | 650 ++++----- .../plugins/metrics/metrics_plugin_test.py | 1262 +++++++++-------- 2 files changed, 973 insertions(+), 939 deletions(-) diff --git a/tensorboard/plugins/metrics/metrics_plugin.py b/tensorboard/plugins/metrics/metrics_plugin.py index f952f9f4d4..5cce912a5a 100644 --- a/tensorboard/plugins/metrics/metrics_plugin.py +++ b/tensorboard/plugins/metrics/metrics_plugin.py @@ -29,13 +29,10 @@ from tensorboard.backend import http_util from tensorboard.data import provider from tensorboard.plugins import base_plugin -from tensorboard.plugins.histogram import ( - metadata as histogram_metadata,) -from tensorboard.plugins.image import ( - metadata as image_metadata,) +from tensorboard.plugins.histogram import metadata as histogram_metadata +from tensorboard.plugins.image import metadata as image_metadata from tensorboard.plugins.metrics import metadata -from tensorboard.plugins.scalar import ( - metadata as scalar_metadata,) +from tensorboard.plugins.scalar import metadata as scalar_metadata _IMGHDR_TO_MIMETYPE = { @@ -48,15 +45,15 @@ _DEFAULT_IMAGE_MIMETYPE = "application/octet-stream" -_SINGLE_RUN_PLUGINS = frozenset([ - histogram_metadata.PLUGIN_NAME, image_metadata.PLUGIN_NAME -]) +_SINGLE_RUN_PLUGINS = frozenset( + [histogram_metadata.PLUGIN_NAME, image_metadata.PLUGIN_NAME] +) _SAMPLED_PLUGINS = frozenset([image_metadata.PLUGIN_NAME]) def _get_tag_description_info(mapping): - """Gets maps from tags to descriptions, and descriptions to runs. + """Gets maps from tags to descriptions, and descriptions to runs. Args: mapping: a nested map `d` such that `d[run][tag]` is a time series @@ -69,20 +66,20 @@ def _get_tag_description_info(mapping): description_to_runs: A map from description strings to a set of run strings. """ - tag_to_descriptions = collections.defaultdict(set) - description_to_runs = collections.defaultdict(set) - for (run, tag_to_content) in mapping.items(): - for (tag, metadatum) in tag_to_content.items(): - description = metadatum.description - if len(description): - tag_to_descriptions[tag].add(description) - description_to_runs[description].add(run) + tag_to_descriptions = collections.defaultdict(set) + description_to_runs = collections.defaultdict(set) + for (run, tag_to_content) in mapping.items(): + for (tag, metadatum) in tag_to_content.items(): + description = metadatum.description + if len(description): + tag_to_descriptions[tag].add(description) + description_to_runs[description].add(run) - return tag_to_descriptions, description_to_runs + return tag_to_descriptions, description_to_runs def _build_combined_description(descriptions, description_to_runs): - """Creates a single description from a set of descriptions. + """Creates a single description from a set of descriptions. Descriptions may be composites when a single tag has different descriptions across multiple runs. @@ -95,20 +92,20 @@ def _build_combined_description(descriptions, description_to_runs): Returns: The combined description string. """ - prefixed_descriptions = [] - for description in descriptions: - runs = sorted(description_to_runs[description]) - run_or_runs = "runs" if len(runs) > 1 else "run" - run_header = "## For " + run_or_runs + ": " + ", ".join(runs) - description_html = run_header + "\n" + description - prefixed_descriptions.append(description_html) + prefixed_descriptions = [] + for description in descriptions: + runs = sorted(description_to_runs[description]) + run_or_runs = "runs" if len(runs) > 1 else "run" + run_header = "## For " + run_or_runs + ": " + ", ".join(runs) + description_html = run_header + "\n" + description + prefixed_descriptions.append(description_html) - header = "# Multiple descriptions\n" - return header + "\n".join(prefixed_descriptions) + header = "# Multiple descriptions\n" + return header + "\n".join(prefixed_descriptions) def _get_tag_to_description(mapping): - """Returns a map of tags to descriptions. + """Returns a map of tags to descriptions. Args: mapping: a nested map `d` such that `d[run][tag]` is a time series @@ -122,22 +119,26 @@ def _get_tag_to_description(mapping): "loss2": "

The lossy details

", } """ - tag_to_descriptions, description_to_runs = _get_tag_description_info(mapping) + tag_to_descriptions, description_to_runs = _get_tag_description_info( + mapping + ) - result = {} - for tag in tag_to_descriptions: - descriptions = sorted(tag_to_descriptions[tag]) - if len(descriptions) == 1: - description = descriptions[0] - else: - description = _build_combined_description(descriptions, description_to_runs) - result[tag] = plugin_util.markdown_to_safe_html(description) + result = {} + for tag in tag_to_descriptions: + descriptions = sorted(tag_to_descriptions[tag]) + if len(descriptions) == 1: + description = descriptions[0] + else: + description = _build_combined_description( + descriptions, description_to_runs + ) + result[tag] = plugin_util.markdown_to_safe_html(description) - return result + return result def _get_run_tag_info(mapping): - """Returns a map of run names to a list of tag names. + """Returns a map of run names to a list of tag names. Args: mapping: a nested map `d` such that `d[run][tag]` is a time series @@ -147,11 +148,11 @@ def _get_run_tag_info(mapping): A map from run strings to a list of tag strings. E.g. {"loss001a": ["actor/loss", "critic/loss"], ...} """ - return {run: sorted(mapping[run]) for run in mapping} + return {run: sorted(mapping[run]) for run in mapping} def _format_basic_mapping(mapping): - """Prepares a scalar or histogram mapping for client consumption. + """Prepares a scalar or histogram mapping for client consumption. Args: mapping: a nested map `d` such that `d[run][tag]` is a time series @@ -162,14 +163,14 @@ def _format_basic_mapping(mapping): runTagInfo: the return type of `_get_run_tag_info` tagDescriptions: the return type of `_get_tag_to_description` """ - return { - "runTagInfo": _get_run_tag_info(mapping), - "tagDescriptions": _get_tag_to_description(mapping), - } + return { + "runTagInfo": _get_run_tag_info(mapping), + "tagDescriptions": _get_tag_to_description(mapping), + } def _format_image_blob_sequence_datum(sorted_datum_list, sample): - """Formats image metadata from a list of BlobSequenceDatum's for clients. + """Formats image metadata from a list of BlobSequenceDatum's for clients. This expects that frontend clients need to access images based on the run+tag+sample. @@ -182,24 +183,26 @@ def _format_image_blob_sequence_datum(sorted_datum_list, sample): Returns: A list of `ImageStepDatum` (see http_api.md). """ - # For images, ignore the first 2 items of a BlobSequenceDatum's values, which - # correspond to width, height. - index = sample + 2 - step_data = [] - for datum in sorted_datum_list: - if len(datum.values) <= index: - continue - - step_data.append({ - "step": datum.step, - "wallTime": datum.wall_time, - "imageId": datum.values[index].blob_key, - }) - return step_data + # For images, ignore the first 2 items of a BlobSequenceDatum's values, which + # correspond to width, height. + index = sample + 2 + step_data = [] + for datum in sorted_datum_list: + if len(datum.values) <= index: + continue + + step_data.append( + { + "step": datum.step, + "wallTime": datum.wall_time, + "imageId": datum.values[index].blob_key, + } + ) + return step_data def _get_tag_run_image_info(mapping): - """Returns a map of tag names to run information. + """Returns a map of tag names to run information. Args: mapping: the result of DataProvider's `list_blob_sequences`. @@ -215,17 +218,17 @@ def _get_tag_run_image_info(mapping): "convolved": {"test": {"maxSamplesPerStep": 50}}, } """ - tag_run_image_info = collections.defaultdict(dict) - for (run, tag_to_content) in mapping.items(): - for (tag, metadatum) in tag_to_content.items(): - tag_run_image_info[tag][run] = { - "maxSamplesPerStep": metadatum.max_length - 2 # width, height - } - return dict(tag_run_image_info) + tag_run_image_info = collections.defaultdict(dict) + for (run, tag_to_content) in mapping.items(): + for (tag, metadatum) in tag_to_content.items(): + tag_run_image_info[tag][run] = { + "maxSamplesPerStep": metadatum.max_length - 2 # width, height + } + return dict(tag_run_image_info) def _format_image_mapping(mapping): - """Prepares an image mapping for client consumption. + """Prepares an image mapping for client consumption. Args: mapping: the result of DataProvider's `list_blob_sequences`. @@ -235,66 +238,65 @@ def _format_image_mapping(mapping): tagRunSampledInfo: the return type of `_get_tag_run_image_info` tagDescriptions: the return type of `_get_tag_description_info` """ - return { - "tagDescriptions": _get_tag_to_description(mapping), - "tagRunSampledInfo": _get_tag_run_image_info(mapping), - } + return { + "tagDescriptions": _get_tag_to_description(mapping), + "tagRunSampledInfo": _get_tag_run_image_info(mapping), + } class MetricsPlugin(base_plugin.TBPlugin): - """Metrics Plugin for TensorBoard.""" + """Metrics Plugin for TensorBoard.""" - plugin_name = metadata.PLUGIN_NAME + plugin_name = metadata.PLUGIN_NAME - def __init__(self, context): - """Instantiates MetricsPlugin. + def __init__(self, context): + """Instantiates MetricsPlugin. Args: context: A base_plugin.TBContext instance. MetricsLoader checks that it contains a valid `data_provider`. """ - self._data_provider = context.data_provider - - # For histograms, use a round number + 1 since sampling includes both start - # and end steps, so N+1 samples corresponds to dividing the step sequence - # into N intervals. - sampling_hints = context.sampling_hints or {} - self._plugin_downsampling = { - "scalars": - sampling_hints.get(scalar_metadata.PLUGIN_NAME, 1000), - "histograms": - sampling_hints.get(histogram_metadata.PLUGIN_NAME, 51), - "images": - sampling_hints.get(image_metadata.PLUGIN_NAME, 10), - } + self._data_provider = context.data_provider + + # For histograms, use a round number + 1 since sampling includes both start + # and end steps, so N+1 samples corresponds to dividing the step sequence + # into N intervals. + sampling_hints = context.sampling_hints or {} + self._plugin_downsampling = { + "scalars": sampling_hints.get(scalar_metadata.PLUGIN_NAME, 1000), + "histograms": sampling_hints.get( + histogram_metadata.PLUGIN_NAME, 51 + ), + "images": sampling_hints.get(image_metadata.PLUGIN_NAME, 10), + } - def frontend_metadata(self): - return base_plugin.FrontendMetadata( - is_ng_component=True, tab_name="Time Series" - ) + def frontend_metadata(self): + return base_plugin.FrontendMetadata( + is_ng_component=True, tab_name="Time Series" + ) - def get_plugin_apps(self): - return { - "/tags": self._serve_tags, - "/timeSeries": self._serve_time_series, - "/imageData": self._serve_image_data, - } + def get_plugin_apps(self): + return { + "/tags": self._serve_tags, + "/timeSeries": self._serve_time_series, + "/imageData": self._serve_image_data, + } - def data_plugin_names(self): - return (scalar_metadata.PLUGIN_NAME, histogram_metadata.PLUGIN_NAME) + def data_plugin_names(self): + return (scalar_metadata.PLUGIN_NAME, histogram_metadata.PLUGIN_NAME) - def is_active(self): - return False # 'data_plugin_names' suffices. + def is_active(self): + return False # 'data_plugin_names' suffices. - @wrappers.Request.application - def _serve_tags(self, request): - ctx = plugin_util.context(request.environ) - experiment = plugin_util.experiment_id(request.environ) - index = self._tags_impl(ctx, experiment=experiment) - return http_util.Respond(request, index, "application/json") + @wrappers.Request.application + def _serve_tags(self, request): + ctx = plugin_util.context(request.environ) + experiment = plugin_util.experiment_id(request.environ) + index = self._tags_impl(ctx, experiment=experiment) + return http_util.Respond(request, index, "application/json") - def _tags_impl(self, ctx, experiment=None): - """Returns tag metadata for a given experiment's logged metrics. + def _tags_impl(self, ctx, experiment=None): + """Returns tag metadata for a given experiment's logged metrics. Args: ctx: A `tensorboard.context.RequestContext` value. @@ -304,45 +306,47 @@ def _tags_impl(self, ctx, experiment=None): A nested dict 'd' with keys in ("scalars", "histograms", "images") and values being the return type of _format_*mapping. """ - scalar_mapping = self._data_provider.list_scalars( - ctx, - experiment_id=experiment, - plugin_name=scalar_metadata.PLUGIN_NAME, - ) - histogram_mapping = self._data_provider.list_tensors( - ctx, - experiment_id=experiment, - plugin_name=histogram_metadata.PLUGIN_NAME, - ) - image_mapping = self._data_provider.list_blob_sequences( - ctx, - experiment_id=experiment, - plugin_name=image_metadata.PLUGIN_NAME, - ) - - result = {} - result["scalars"] = _format_basic_mapping(scalar_mapping) - result["histograms"] = _format_basic_mapping(histogram_mapping) - result["images"] = _format_image_mapping(image_mapping) - return result - - @wrappers.Request.application - def _serve_time_series(self, request): - ctx = plugin_util.context(request.environ) - experiment = plugin_util.experiment_id(request.environ) - series_requests_string = request.form.get("requests") - if not series_requests_string: - raise errors.InvalidArgumentError("Missing 'requests' field") - try: - series_requests = json.loads(series_requests_string) - except ValueError: - raise errors.InvalidArgumentError("Unable to parse 'requests' as JSON") - - response = self._time_series_impl(ctx, experiment, series_requests) - return http_util.Respond(request, response, "application/json") - - def _time_series_impl(self, ctx, experiment, series_requests): - """Constructs a list of responses from a list of series requests. + scalar_mapping = self._data_provider.list_scalars( + ctx, + experiment_id=experiment, + plugin_name=scalar_metadata.PLUGIN_NAME, + ) + histogram_mapping = self._data_provider.list_tensors( + ctx, + experiment_id=experiment, + plugin_name=histogram_metadata.PLUGIN_NAME, + ) + image_mapping = self._data_provider.list_blob_sequences( + ctx, + experiment_id=experiment, + plugin_name=image_metadata.PLUGIN_NAME, + ) + + result = {} + result["scalars"] = _format_basic_mapping(scalar_mapping) + result["histograms"] = _format_basic_mapping(histogram_mapping) + result["images"] = _format_image_mapping(image_mapping) + return result + + @wrappers.Request.application + def _serve_time_series(self, request): + ctx = plugin_util.context(request.environ) + experiment = plugin_util.experiment_id(request.environ) + series_requests_string = request.form.get("requests") + if not series_requests_string: + raise errors.InvalidArgumentError("Missing 'requests' field") + try: + series_requests = json.loads(series_requests_string) + except ValueError: + raise errors.InvalidArgumentError( + "Unable to parse 'requests' as JSON" + ) + + response = self._time_series_impl(ctx, experiment, series_requests) + return http_util.Respond(request, response, "application/json") + + def _time_series_impl(self, ctx, experiment, series_requests): + """Constructs a list of responses from a list of series requests. Args: ctx: A `tensorboard.context.RequestContext` value. @@ -352,49 +356,51 @@ def _time_series_impl(self, ctx, experiment, series_requests): Returns: A list of `TimeSeriesResponse` dicts (see http_api.md). """ - responses = [ - self._get_time_series(ctx, experiment, request) - for request in series_requests - ] - return responses - - def _create_base_response(self, series_request): - tag = series_request.get("tag") - run = series_request.get("run") - plugin = series_request.get("plugin") - sample = series_request.get("sample") - response = {"plugin": plugin, "tag": tag} - if isinstance(run, str): - response["run"] = run - if isinstance(sample, int): - response["sample"] = sample - - return response - - def _get_invalid_request_error(self, series_request): - tag = series_request.get("tag") - plugin = series_request.get("plugin") - run = series_request.get("run") - sample = series_request.get("sample") - - if not isinstance(tag, str): - return "Missing tag" - - if (plugin != scalar_metadata.PLUGIN_NAME and - plugin != histogram_metadata.PLUGIN_NAME and - plugin != image_metadata.PLUGIN_NAME): - return "Invalid plugin" - - if plugin in _SINGLE_RUN_PLUGINS and not isinstance(run, str): - return "Missing run" - - if plugin in _SAMPLED_PLUGINS and not isinstance(sample, int): - return "Missing sample" - - return None - - def _get_time_series(self, ctx, experiment, series_request): - """Returns time series data for a given tag, plugin. + responses = [ + self._get_time_series(ctx, experiment, request) + for request in series_requests + ] + return responses + + def _create_base_response(self, series_request): + tag = series_request.get("tag") + run = series_request.get("run") + plugin = series_request.get("plugin") + sample = series_request.get("sample") + response = {"plugin": plugin, "tag": tag} + if isinstance(run, str): + response["run"] = run + if isinstance(sample, int): + response["sample"] = sample + + return response + + def _get_invalid_request_error(self, series_request): + tag = series_request.get("tag") + plugin = series_request.get("plugin") + run = series_request.get("run") + sample = series_request.get("sample") + + if not isinstance(tag, str): + return "Missing tag" + + if ( + plugin != scalar_metadata.PLUGIN_NAME + and plugin != histogram_metadata.PLUGIN_NAME + and plugin != image_metadata.PLUGIN_NAME + ): + return "Invalid plugin" + + if plugin in _SINGLE_RUN_PLUGINS and not isinstance(run, str): + return "Missing run" + + if plugin in _SAMPLED_PLUGINS and not isinstance(sample, int): + return "Missing sample" + + return None + + def _get_time_series(self, ctx, experiment, series_request): + """Returns time series data for a given tag, plugin. Args: ctx: A `tensorboard.context.RequestContext` value. @@ -404,34 +410,38 @@ def _get_time_series(self, ctx, experiment, series_request): Returns: A `TimeSeriesResponse` dict (see http_api.md). """ - tag = series_request.get("tag") - run = series_request.get("run") - plugin = series_request.get("plugin") - sample = series_request.get("sample") - response = self._create_base_response(series_request) - request_error = self._get_invalid_request_error(series_request) - if request_error: - response["error"] = request_error - return response - - runs = [run] if run else None - run_to_series = None - if plugin == scalar_metadata.PLUGIN_NAME: - run_to_series = self._get_run_to_scalar_series(ctx, experiment, tag, runs) - - if plugin == histogram_metadata.PLUGIN_NAME: - run_to_series = self._get_run_to_histogram_series( - ctx, experiment, tag, runs) - - if plugin == image_metadata.PLUGIN_NAME: - run_to_series = self._get_run_to_image_series( - ctx, experiment, tag, sample, runs) - - response["runToSeries"] = run_to_series - return response - - def _get_run_to_scalar_series(self, ctx, experiment, tag, runs): - """Builds a run-to-scalar-series dict for client consumption. + tag = series_request.get("tag") + run = series_request.get("run") + plugin = series_request.get("plugin") + sample = series_request.get("sample") + response = self._create_base_response(series_request) + request_error = self._get_invalid_request_error(series_request) + if request_error: + response["error"] = request_error + return response + + runs = [run] if run else None + run_to_series = None + if plugin == scalar_metadata.PLUGIN_NAME: + run_to_series = self._get_run_to_scalar_series( + ctx, experiment, tag, runs + ) + + if plugin == histogram_metadata.PLUGIN_NAME: + run_to_series = self._get_run_to_histogram_series( + ctx, experiment, tag, runs + ) + + if plugin == image_metadata.PLUGIN_NAME: + run_to_series = self._get_run_to_image_series( + ctx, experiment, tag, sample, runs + ) + + response["runToSeries"] = run_to_series + return response + + def _get_run_to_scalar_series(self, ctx, experiment, tag, runs): + """Builds a run-to-scalar-series dict for client consumption. Args: ctx: A `tensorboard.context.RequestContext` value. @@ -442,30 +452,32 @@ def _get_run_to_scalar_series(self, ctx, experiment, tag, runs): Returns: A map from string run names to `ScalarStepDatum` (see http_api.md). """ - mapping = self._data_provider.read_scalars( - ctx, - experiment_id=experiment, - plugin_name=scalar_metadata.PLUGIN_NAME, - downsample=self._plugin_downsampling["scalars"], - run_tag_filter=provider.RunTagFilter(runs=runs, tags=[tag]), - ) - - run_to_series = {} - for (result_run, tag_data) in mapping.items(): - if tag not in tag_data: - continue - values = [{ - "wallTime": datum.wall_time, - "step": datum.step, - "value": datum.value - } for datum in tag_data[tag]] - run_to_series[result_run] = values - - return run_to_series - - - def _format_histogram_datum_bins(self, datum): - """Formats a histogram datum's bins for client consumption. + mapping = self._data_provider.read_scalars( + ctx, + experiment_id=experiment, + plugin_name=scalar_metadata.PLUGIN_NAME, + downsample=self._plugin_downsampling["scalars"], + run_tag_filter=provider.RunTagFilter(runs=runs, tags=[tag]), + ) + + run_to_series = {} + for (result_run, tag_data) in mapping.items(): + if tag not in tag_data: + continue + values = [ + { + "wallTime": datum.wall_time, + "step": datum.step, + "value": datum.value, + } + for datum in tag_data[tag] + ] + run_to_series[result_run] = values + + return run_to_series + + def _format_histogram_datum_bins(self, datum): + """Formats a histogram datum's bins for client consumption. Args: datum: a DataProvider's TensorDatum. @@ -473,13 +485,12 @@ def _format_histogram_datum_bins(self, datum): Returns: A list of `HistogramBin`s (see http_api.md). """ - numpy_list = datum.numpy.tolist() - bins = [{"min": x[0], "max": x[1], "count": x[2]} for x in numpy_list] - return bins - + numpy_list = datum.numpy.tolist() + bins = [{"min": x[0], "max": x[1], "count": x[2]} for x in numpy_list] + return bins - def _get_run_to_histogram_series(self, ctx, experiment, tag, runs): - """Builds a run-to-histogram-series dict for client consumption. + def _get_run_to_histogram_series(self, ctx, experiment, tag, runs): + """Builds a run-to-histogram-series dict for client consumption. Args: ctx: A `tensorboard.context.RequestContext` value. @@ -490,30 +501,32 @@ def _get_run_to_histogram_series(self, ctx, experiment, tag, runs): Returns: A map from string run names to `HistogramStepDatum` (see http_api.md). """ - mapping = self._data_provider.read_tensors( - ctx, - experiment_id=experiment, - plugin_name=histogram_metadata.PLUGIN_NAME, - downsample=self._plugin_downsampling["histograms"], - run_tag_filter=provider.RunTagFilter(runs=runs, tags=[tag]), - ) - - run_to_series = {} - for (result_run, tag_data) in mapping.items(): - if tag not in tag_data: - continue - values = [{ - "wallTime": datum.wall_time, - "step": datum.step, - "bins": self._format_histogram_datum_bins(datum) - } for datum in tag_data[tag]] - run_to_series[result_run] = values - - return run_to_series - - - def _get_run_to_image_series(self, ctx, experiment, tag, sample, runs): - """Builds a run-to-image-series dict for client consumption. + mapping = self._data_provider.read_tensors( + ctx, + experiment_id=experiment, + plugin_name=histogram_metadata.PLUGIN_NAME, + downsample=self._plugin_downsampling["histograms"], + run_tag_filter=provider.RunTagFilter(runs=runs, tags=[tag]), + ) + + run_to_series = {} + for (result_run, tag_data) in mapping.items(): + if tag not in tag_data: + continue + values = [ + { + "wallTime": datum.wall_time, + "step": datum.step, + "bins": self._format_histogram_datum_bins(datum), + } + for datum in tag_data[tag] + ] + run_to_series[result_run] = values + + return run_to_series + + def _get_run_to_image_series(self, ctx, experiment, tag, sample, runs): + """Builds a run-to-image-series dict for client consumption. Args: ctx: A `tensorboard.context.RequestContext` value. @@ -525,39 +538,40 @@ def _get_run_to_image_series(self, ctx, experiment, tag, sample, runs): Returns: A `RunToSeries` dict (see http_api.md). """ - mapping = self._data_provider.read_blob_sequences( - ctx, - experiment_id=experiment, - plugin_name=image_metadata.PLUGIN_NAME, - downsample=self._plugin_downsampling["images"], - run_tag_filter=provider.RunTagFilter(runs, tags=[tag]), - ) - - run_to_series = {} - for (result_run, tag_data) in mapping.items(): - if tag not in tag_data: - continue - blob_sequence_datum_list = tag_data[tag] - series = _format_image_blob_sequence_datum( - blob_sequence_datum_list, sample) - if series: - run_to_series[result_run] = series - - return run_to_series - - @wrappers.Request.application - def _serve_image_data(self, request): - """Serves an individual image.""" - ctx = plugin_util.context(request.environ) - blob_key = request.args["imageId"] - if not blob_key: - raise errors.InvalidArgumentError("Missing 'imageId' field") - - (data, content_type) = self._image_data_impl(ctx, blob_key) - return http_util.Respond(request, data, content_type) - - def _image_data_impl(self, ctx, blob_key): - """Gets the image data for a blob key. + mapping = self._data_provider.read_blob_sequences( + ctx, + experiment_id=experiment, + plugin_name=image_metadata.PLUGIN_NAME, + downsample=self._plugin_downsampling["images"], + run_tag_filter=provider.RunTagFilter(runs, tags=[tag]), + ) + + run_to_series = {} + for (result_run, tag_data) in mapping.items(): + if tag not in tag_data: + continue + blob_sequence_datum_list = tag_data[tag] + series = _format_image_blob_sequence_datum( + blob_sequence_datum_list, sample + ) + if series: + run_to_series[result_run] = series + + return run_to_series + + @wrappers.Request.application + def _serve_image_data(self, request): + """Serves an individual image.""" + ctx = plugin_util.context(request.environ) + blob_key = request.args["imageId"] + if not blob_key: + raise errors.InvalidArgumentError("Missing 'imageId' field") + + (data, content_type) = self._image_data_impl(ctx, blob_key) + return http_util.Respond(request, data, content_type) + + def _image_data_impl(self, ctx, blob_key): + """Gets the image data for a blob key. Args: ctx: A `tensorboard.context.RequestContext` value. @@ -568,9 +582,9 @@ def _image_data_impl(self, ctx, blob_key): data: a raw bytestring of the requested image's contents. content_type: a string HTTP content type. """ - data = self._data_provider.read_blob(ctx, blob_key=blob_key) - image_type = imghdr.what(None, data) - content_type = _IMGHDR_TO_MIMETYPE.get( - image_type, _DEFAULT_IMAGE_MIMETYPE - ) - return (data, content_type) + data = self._data_provider.read_blob(ctx, blob_key=blob_key) + image_type = imghdr.what(None, data) + content_type = _IMGHDR_TO_MIMETYPE.get( + image_type, _DEFAULT_IMAGE_MIMETYPE + ) + return (data, content_type) diff --git a/tensorboard/plugins/metrics/metrics_plugin_test.py b/tensorboard/plugins/metrics/metrics_plugin_test.py index 9b095577d7..dde94c6ecd 100644 --- a/tensorboard/plugins/metrics/metrics_plugin_test.py +++ b/tensorboard/plugins/metrics/metrics_plugin_test.py @@ -26,665 +26,685 @@ import tensorflow.compat.v2 as tf from tensorboard import context +from tensorboard.backend.event_processing import data_provider from tensorboard.backend.event_processing import ( - data_provider,) -from tensorboard.backend.event_processing import ( - plugin_event_multiplexer as event_multiplexer,) + plugin_event_multiplexer as event_multiplexer, +) from tensorboard.data import provider from tensorboard.plugins import base_plugin -from tensorboard.plugins.image import ( - metadata as image_metadata,) -from tensorboard.plugins.metrics import ( - metrics_plugin,) +from tensorboard.plugins.image import metadata as image_metadata +from tensorboard.plugins.metrics import metrics_plugin tf1.enable_eager_execution() class MetricsPluginTest(tf.test.TestCase): - - def setUp(self): - super(MetricsPluginTest, self).setUp() - self._logdir = self.get_temp_dir() - self._multiplexer = event_multiplexer.EventMultiplexer() - - flags = argparse.Namespace(generic_data="true") - provider = data_provider.MultiplexerDataProvider(self._multiplexer, - self._logdir) - ctx = base_plugin.TBContext( - flags=flags, - logdir=self._logdir, - multiplexer=self._multiplexer, - data_provider=provider, - ) - self._plugin = metrics_plugin.MetricsPlugin(ctx) - - ### Writing utilities. - - def _write_scalar(self, run, tag, description=None): - subdir = os.path.join(self._logdir, run) - writer = tf.summary.create_file_writer(subdir) - - with writer.as_default(): - tf.summary.scalar(tag, 42, step=0, description=description) - writer.flush() - self._multiplexer.AddRunsFromDirectory(self._logdir) - - def _write_scalar_data(self, run, tag, data=[]): - """Writes scalar data, starting at step 0. + def setUp(self): + super(MetricsPluginTest, self).setUp() + self._logdir = self.get_temp_dir() + self._multiplexer = event_multiplexer.EventMultiplexer() + + flags = argparse.Namespace(generic_data="true") + provider = data_provider.MultiplexerDataProvider( + self._multiplexer, self._logdir + ) + ctx = base_plugin.TBContext( + flags=flags, + logdir=self._logdir, + multiplexer=self._multiplexer, + data_provider=provider, + ) + self._plugin = metrics_plugin.MetricsPlugin(ctx) + + ### Writing utilities. + + def _write_scalar(self, run, tag, description=None): + subdir = os.path.join(self._logdir, run) + writer = tf.summary.create_file_writer(subdir) + + with writer.as_default(): + tf.summary.scalar(tag, 42, step=0, description=description) + writer.flush() + self._multiplexer.AddRunsFromDirectory(self._logdir) + + def _write_scalar_data(self, run, tag, data=[]): + """Writes scalar data, starting at step 0. Args: run: string run name. tag: string tag name. data: list of scalar values to write at each step. """ - subdir = os.path.join(self._logdir, run) - writer = tf.summary.create_file_writer(subdir) - - with writer.as_default(): - step = 0 - for datum in data: - tf.summary.scalar(tag, datum, step=step) - step += 1 - writer.flush() - self._multiplexer.AddRunsFromDirectory(self._logdir) - - def _write_histogram(self, run, tag, description=None): - subdir = os.path.join(self._logdir, run) - writer = tf.summary.create_file_writer(subdir) - - with writer.as_default(): - data = tf.random.normal(shape=[3]) - tf.summary.histogram(tag, data, step=0, description=description) - writer.flush() - self._multiplexer.AddRunsFromDirectory(self._logdir) - - def _write_histogram_data(self, run, tag, data=[]): - """Writes histogram data, starting at step 0. + subdir = os.path.join(self._logdir, run) + writer = tf.summary.create_file_writer(subdir) + + with writer.as_default(): + step = 0 + for datum in data: + tf.summary.scalar(tag, datum, step=step) + step += 1 + writer.flush() + self._multiplexer.AddRunsFromDirectory(self._logdir) + + def _write_histogram(self, run, tag, description=None): + subdir = os.path.join(self._logdir, run) + writer = tf.summary.create_file_writer(subdir) + + with writer.as_default(): + data = tf.random.normal(shape=[3]) + tf.summary.histogram(tag, data, step=0, description=description) + writer.flush() + self._multiplexer.AddRunsFromDirectory(self._logdir) + + def _write_histogram_data(self, run, tag, data=[]): + """Writes histogram data, starting at step 0. Args: run: string run name. tag: string tag name. data: list of histogram values to write at each step. """ - subdir = os.path.join(self._logdir, run) - writer = tf.summary.create_file_writer(subdir) - - with writer.as_default(): - step = 0 - for datum in data: - tf.summary.histogram(tag, datum, step=step) - step += 1 - writer.flush() - self._multiplexer.AddRunsFromDirectory(self._logdir) - - def _write_image(self, run, tag, samples=2, description=None): - subdir = os.path.join(self._logdir, run) - writer = tf.summary.create_file_writer(subdir) - - with writer.as_default(): - data = tf.random.normal(shape=[samples, 8, 8, 1]) - tf.summary.image( - tag, data, step=0, max_outputs=samples, description=description) - writer.flush() - self._multiplexer.AddRunsFromDirectory(self._logdir) - - ### Misc utilities. - - def _clean_time_series_responses(self, responses): - """Cleans non-deterministic data from a TimeSeriesResponse, in place.""" - for response in responses: - run_to_series = response.get("runToSeries", {}) - for (run, series) in run_to_series.items(): - for datum in series: - if "wallTime" in datum: - datum["wallTime"] = "" - - # Clean images. - run_to_image_series = response.get("runToSeries", {}) - for (run, series) in run_to_image_series.items(): - for datum in series: - if "wallTime" in datum: - datum["wallTime"] = "" - if "imageId" in datum: - datum["imageId"] = "" - - return responses - - def _get_image_blob_key(self, run, tag, step=0, sample=0): - """Returns a single image's blob_key after it has been written.""" - mapping = self._plugin._data_provider.read_blob_sequences( - context.RequestContext(), - experiment_id="expid", - plugin_name=image_metadata.PLUGIN_NAME, - downsample=10, - run_tag_filter=provider.RunTagFilter(tags=[tag]), - ) - blob_sequence_datum = mapping[run][tag][step] - # For images, the first 2 datum values are ignored. - return blob_sequence_datum.values[2 + sample].blob_key - - - ### Actual tests. - - def test_routes_provided(self): - """Tests that the plugin offers the correct routes.""" - routes = self._plugin.get_plugin_apps() - self.assertIsInstance(routes["/tags"], collections.Callable) - - def test_tags_empty(self): - response = self._plugin._tags_impl(context.RequestContext(), "eid") - - expected_tags = { - "runTagInfo": {}, - "tagDescriptions": {}, - } - self.assertEqual(expected_tags, response["scalars"]) - self.assertEqual(expected_tags, response["histograms"]) - self.assertEqual({ - "tagDescriptions": {}, - "tagRunSampledInfo": {}, - }, response["images"]) - - def test_tags(self): - self._write_scalar("run1", "scalars/tagA", None) - self._write_scalar("run1", "scalars/tagA", None) - self._write_scalar("run1", "scalars/tagB", None) - self._write_scalar("run2", "scalars/tagB", None) - self._write_histogram("run1", "histograms/tagA", None) - self._write_histogram("run1", "histograms/tagA", None) - self._write_histogram("run1", "histograms/tagB", None) - self._write_histogram("run2", "histograms/tagB", None) - self._write_image("run1", "images/tagA", 1, None) - self._write_image("run1", "images/tagA", 2, None) - self._write_image("run1", "images/tagB", 3, None) - self._write_image("run2", "images/tagB", 4, None) - - self._multiplexer.Reload() - - response = self._plugin._tags_impl(context.RequestContext(), "eid") - - self.assertEqual( - { - "runTagInfo": { - "run1": ["scalars/tagA", "scalars/tagB"], - "run2": ["scalars/tagB"], - }, + subdir = os.path.join(self._logdir, run) + writer = tf.summary.create_file_writer(subdir) + + with writer.as_default(): + step = 0 + for datum in data: + tf.summary.histogram(tag, datum, step=step) + step += 1 + writer.flush() + self._multiplexer.AddRunsFromDirectory(self._logdir) + + def _write_image(self, run, tag, samples=2, description=None): + subdir = os.path.join(self._logdir, run) + writer = tf.summary.create_file_writer(subdir) + + with writer.as_default(): + data = tf.random.normal(shape=[samples, 8, 8, 1]) + tf.summary.image( + tag, data, step=0, max_outputs=samples, description=description + ) + writer.flush() + self._multiplexer.AddRunsFromDirectory(self._logdir) + + ### Misc utilities. + + def _clean_time_series_responses(self, responses): + """Cleans non-deterministic data from a TimeSeriesResponse, in place.""" + for response in responses: + run_to_series = response.get("runToSeries", {}) + for (run, series) in run_to_series.items(): + for datum in series: + if "wallTime" in datum: + datum["wallTime"] = "" + + # Clean images. + run_to_image_series = response.get("runToSeries", {}) + for (run, series) in run_to_image_series.items(): + for datum in series: + if "wallTime" in datum: + datum["wallTime"] = "" + if "imageId" in datum: + datum["imageId"] = "" + + return responses + + def _get_image_blob_key(self, run, tag, step=0, sample=0): + """Returns a single image's blob_key after it has been written.""" + mapping = self._plugin._data_provider.read_blob_sequences( + context.RequestContext(), + experiment_id="expid", + plugin_name=image_metadata.PLUGIN_NAME, + downsample=10, + run_tag_filter=provider.RunTagFilter(tags=[tag]), + ) + blob_sequence_datum = mapping[run][tag][step] + # For images, the first 2 datum values are ignored. + return blob_sequence_datum.values[2 + sample].blob_key + + ### Actual tests. + + def test_routes_provided(self): + """Tests that the plugin offers the correct routes.""" + routes = self._plugin.get_plugin_apps() + self.assertIsInstance(routes["/tags"], collections.Callable) + + def test_tags_empty(self): + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + expected_tags = { + "runTagInfo": {}, "tagDescriptions": {}, - }, - response["scalars"], - ) - self.assertEqual( - { - "runTagInfo": { - "run1": ["histograms/tagA", "histograms/tagB"], - "run2": ["histograms/tagB"], + } + self.assertEqual(expected_tags, response["scalars"]) + self.assertEqual(expected_tags, response["histograms"]) + self.assertEqual( + {"tagDescriptions": {}, "tagRunSampledInfo": {},}, + response["images"], + ) + + def test_tags(self): + self._write_scalar("run1", "scalars/tagA", None) + self._write_scalar("run1", "scalars/tagA", None) + self._write_scalar("run1", "scalars/tagB", None) + self._write_scalar("run2", "scalars/tagB", None) + self._write_histogram("run1", "histograms/tagA", None) + self._write_histogram("run1", "histograms/tagA", None) + self._write_histogram("run1", "histograms/tagB", None) + self._write_histogram("run2", "histograms/tagB", None) + self._write_image("run1", "images/tagA", 1, None) + self._write_image("run1", "images/tagA", 2, None) + self._write_image("run1", "images/tagB", 3, None) + self._write_image("run2", "images/tagB", 4, None) + + self._multiplexer.Reload() + + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + self.assertEqual( + { + "runTagInfo": { + "run1": ["scalars/tagA", "scalars/tagB"], + "run2": ["scalars/tagB"], + }, + "tagDescriptions": {}, }, - "tagDescriptions": {}, - }, - response["histograms"], - ) - self.assertEqual( - { - "tagDescriptions": {}, - "tagRunSampledInfo": { - "images/tagA": { - "run1": { - "maxSamplesPerStep": 2 - } + response["scalars"], + ) + self.assertEqual( + { + "runTagInfo": { + "run1": ["histograms/tagA", "histograms/tagB"], + "run2": ["histograms/tagB"], }, - "images/tagB": { - "run1": { - "maxSamplesPerStep": 3 - }, - "run2": { - "maxSamplesPerStep": 4 + "tagDescriptions": {}, + }, + response["histograms"], + ) + self.assertEqual( + { + "tagDescriptions": {}, + "tagRunSampledInfo": { + "images/tagA": {"run1": {"maxSamplesPerStep": 2}}, + "images/tagB": { + "run1": {"maxSamplesPerStep": 3}, + "run2": {"maxSamplesPerStep": 4}, }, }, }, - }, - response["images"], - ) - - def test_tags_with_descriptions(self): - self._write_scalar("run1", "scalars/tagA", "Describing tagA") - self._write_scalar("run1", "scalars/tagB", "Describing tagB") - self._write_scalar("run2", "scalars/tagB", "Describing tagB") - self._write_histogram("run1", "histograms/tagA", "Describing tagA") - self._write_histogram("run1", "histograms/tagB", "Describing tagB") - self._write_histogram("run2", "histograms/tagB", "Describing tagB") - self._write_image("run1", "images/tagA", 1, "Describing tagA") - self._write_image("run1", "images/tagB", 2, "Describing tagB") - self._write_image("run2", "images/tagB", 3, "Describing tagB") - self._multiplexer.Reload() - - response = self._plugin._tags_impl(context.RequestContext(), "eid") - - self.assertEqual( - { - "runTagInfo": { - "run1": ["scalars/tagA", "scalars/tagB"], - "run2": ["scalars/tagB"], - }, - "tagDescriptions": { - "scalars/tagA": "

Describing tagA

", - "scalars/tagB": "

Describing tagB

", - }, - }, - response["scalars"], - ) - self.assertEqual( - { - "runTagInfo": { - "run1": ["histograms/tagA", "histograms/tagB"], - "run2": ["histograms/tagB"], - }, - "tagDescriptions": { - "histograms/tagA": "

Describing tagA

", - "histograms/tagB": "

Describing tagB

", + response["images"], + ) + + def test_tags_with_descriptions(self): + self._write_scalar("run1", "scalars/tagA", "Describing tagA") + self._write_scalar("run1", "scalars/tagB", "Describing tagB") + self._write_scalar("run2", "scalars/tagB", "Describing tagB") + self._write_histogram("run1", "histograms/tagA", "Describing tagA") + self._write_histogram("run1", "histograms/tagB", "Describing tagB") + self._write_histogram("run2", "histograms/tagB", "Describing tagB") + self._write_image("run1", "images/tagA", 1, "Describing tagA") + self._write_image("run1", "images/tagB", 2, "Describing tagB") + self._write_image("run2", "images/tagB", 3, "Describing tagB") + self._multiplexer.Reload() + + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + self.assertEqual( + { + "runTagInfo": { + "run1": ["scalars/tagA", "scalars/tagB"], + "run2": ["scalars/tagB"], + }, + "tagDescriptions": { + "scalars/tagA": "

Describing tagA

", + "scalars/tagB": "

Describing tagB

", + }, }, - }, - response["histograms"], - ) - self.assertEqual( - { - "tagDescriptions": { - "images/tagA": "

Describing tagA

", - "images/tagB": "

Describing tagB

", + response["scalars"], + ) + self.assertEqual( + { + "runTagInfo": { + "run1": ["histograms/tagA", "histograms/tagB"], + "run2": ["histograms/tagB"], + }, + "tagDescriptions": { + "histograms/tagA": "

Describing tagA

", + "histograms/tagB": "

Describing tagB

", + }, }, - "tagRunSampledInfo": { - "images/tagA": { - "run1": { - "maxSamplesPerStep": 1 - } + response["histograms"], + ) + self.assertEqual( + { + "tagDescriptions": { + "images/tagA": "

Describing tagA

", + "images/tagB": "

Describing tagB

", }, - "images/tagB": { - "run1": { - "maxSamplesPerStep": 2 - }, - "run2": { - "maxSamplesPerStep": 3 + "tagRunSampledInfo": { + "images/tagA": {"run1": {"maxSamplesPerStep": 1}}, + "images/tagB": { + "run1": {"maxSamplesPerStep": 2}, + "run2": {"maxSamplesPerStep": 3}, }, }, }, - }, - response["images"], - ) - - def test_tags_conflicting_description(self): - self._write_scalar("run1", "scalars/tagA", None) - self._write_scalar("run2", "scalars/tagA", "tagA is hot") - self._write_scalar("run3", "scalars/tagA", "tagA is cold") - self._write_scalar("run4", "scalars/tagA", "tagA is cold") - self._write_histogram("run1", "histograms/tagA", None) - self._write_histogram("run2", "histograms/tagA", "tagA is hot") - self._write_histogram("run3", "histograms/tagA", "tagA is cold") - self._write_histogram("run4", "histograms/tagA", "tagA is cold") - self._multiplexer.Reload() - - response = self._plugin._tags_impl(context.RequestContext(), "eid") - - expected_composite_description = ("

Multiple descriptions

\n" - "

For runs: run3, run4

\n" - "

tagA is cold

\n" - "

For run: run2

\n" - "

tagA is hot

") - self.assertEqual( - {"scalars/tagA": expected_composite_description}, - response["scalars"]["tagDescriptions"], - ) - self.assertEqual( - {"histograms/tagA": expected_composite_description}, - response["histograms"]["tagDescriptions"], - ) - - def test_tags_unsafe_description(self): - self._write_scalar("<&#run>", "scalars/<&#tag>", "<&#description>") - self._write_histogram("<&#run>", "histograms/<&#tag>", "<&#description>") - self._multiplexer.Reload() - - response = self._plugin._tags_impl(context.RequestContext(), "eid") - - self.assertEqual( - {"scalars/<&#tag>": "

<&#description>

"}, - response["scalars"]["tagDescriptions"], - ) - self.assertEqual( - {"histograms/<&#tag>": "

<&#description>

"}, - response["histograms"]["tagDescriptions"], - ) - - def test_tags_unsafe_conflicting_description(self): - self._write_scalar("<&#run1>", "scalars/<&#tag>", None) - self._write_scalar("<&#run2>", "scalars/<&#tag>", "<&# is hot>") - self._write_scalar("<&#run3>", "scalars/<&#tag>", "<&# is cold>") - self._write_scalar("<&#run4>", "scalars/<&#tag>", "<&# is cold>") - self._write_histogram("<&#run1>", "histograms/<&#tag>", None) - self._write_histogram("<&#run2>", "histograms/<&#tag>", "<&# is hot>") - self._write_histogram("<&#run3>", "histograms/<&#tag>", "<&# is cold>") - self._write_histogram("<&#run4>", "histograms/<&#tag>", "<&# is cold>") - self._multiplexer.Reload() - - response = self._plugin._tags_impl(context.RequestContext(), "eid") - - expected_composite_description = ( - "

Multiple descriptions

\n" - "

For runs: <&#run3>, <&#run4>

\n" - "

<&# is cold>

\n" - "

For run: <&#run2>

\n" - "

<&# is hot>

") - self.assertEqual( - {"scalars/<&#tag>": expected_composite_description}, - response["scalars"]["tagDescriptions"], - ) - self.assertEqual( - {"histograms/<&#tag>": expected_composite_description}, - response["histograms"]["tagDescriptions"], - ) - - def test_time_series_scalar(self): - self._write_scalar_data("run1", "scalars/tagA", [0, 100, -200]) - self._multiplexer.Reload() - - requests = [{"plugin": "scalars", "tag": "scalars/tagA"}] - response = self._plugin._time_series_impl( - context.RequestContext(), "", requests) - clean_response = self._clean_time_series_responses(response) - - self.assertEqual( - [{ - "plugin": "scalars", - "tag": "scalars/tagA", - "runToSeries": { - "run1": [ - { - "wallTime": "", - "step": 0, - "value": 0.0, + response["images"], + ) + + def test_tags_conflicting_description(self): + self._write_scalar("run1", "scalars/tagA", None) + self._write_scalar("run2", "scalars/tagA", "tagA is hot") + self._write_scalar("run3", "scalars/tagA", "tagA is cold") + self._write_scalar("run4", "scalars/tagA", "tagA is cold") + self._write_histogram("run1", "histograms/tagA", None) + self._write_histogram("run2", "histograms/tagA", "tagA is hot") + self._write_histogram("run3", "histograms/tagA", "tagA is cold") + self._write_histogram("run4", "histograms/tagA", "tagA is cold") + self._multiplexer.Reload() + + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + expected_composite_description = ( + "

Multiple descriptions

\n" + "

For runs: run3, run4

\n" + "

tagA is cold

\n" + "

For run: run2

\n" + "

tagA is hot

" + ) + self.assertEqual( + {"scalars/tagA": expected_composite_description}, + response["scalars"]["tagDescriptions"], + ) + self.assertEqual( + {"histograms/tagA": expected_composite_description}, + response["histograms"]["tagDescriptions"], + ) + + def test_tags_unsafe_description(self): + self._write_scalar("<&#run>", "scalars/<&#tag>", "<&#description>") + self._write_histogram( + "<&#run>", "histograms/<&#tag>", "<&#description>" + ) + self._multiplexer.Reload() + + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + self.assertEqual( + {"scalars/<&#tag>": "

<&#description>

"}, + response["scalars"]["tagDescriptions"], + ) + self.assertEqual( + {"histograms/<&#tag>": "

<&#description>

"}, + response["histograms"]["tagDescriptions"], + ) + + def test_tags_unsafe_conflicting_description(self): + self._write_scalar("<&#run1>", "scalars/<&#tag>", None) + self._write_scalar("<&#run2>", "scalars/<&#tag>", "<&# is hot>") + self._write_scalar("<&#run3>", "scalars/<&#tag>", "<&# is cold>") + self._write_scalar("<&#run4>", "scalars/<&#tag>", "<&# is cold>") + self._write_histogram("<&#run1>", "histograms/<&#tag>", None) + self._write_histogram("<&#run2>", "histograms/<&#tag>", "<&# is hot>") + self._write_histogram("<&#run3>", "histograms/<&#tag>", "<&# is cold>") + self._write_histogram("<&#run4>", "histograms/<&#tag>", "<&# is cold>") + self._multiplexer.Reload() + + response = self._plugin._tags_impl(context.RequestContext(), "eid") + + expected_composite_description = ( + "

Multiple descriptions

\n" + "

For runs: <&#run3>, <&#run4>

\n" + "

<&# is cold>

\n" + "

For run: <&#run2>

\n" + "

<&# is hot>

" + ) + self.assertEqual( + {"scalars/<&#tag>": expected_composite_description}, + response["scalars"]["tagDescriptions"], + ) + self.assertEqual( + {"histograms/<&#tag>": expected_composite_description}, + response["histograms"]["tagDescriptions"], + ) + + def test_time_series_scalar(self): + self._write_scalar_data("run1", "scalars/tagA", [0, 100, -200]) + self._multiplexer.Reload() + + requests = [{"plugin": "scalars", "tag": "scalars/tagA"}] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests + ) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [ + { + "plugin": "scalars", + "tag": "scalars/tagA", + "runToSeries": { + "run1": [ + { + "wallTime": "", + "step": 0, + "value": 0.0, + }, + { + "wallTime": "", + "step": 1, + "value": 100.0, + }, + { + "wallTime": "", + "step": 2, + "value": -200.0, + }, + ] }, - { - "wallTime": "", - "step": 1, - "value": 100.0, + } + ], + clean_response, + ) + + def test_time_series_histogram(self): + self._write_histogram_data("run1", "histograms/tagA", [0, 10]) + self._multiplexer.Reload() + + requests = [ + {"plugin": "histograms", "tag": "histograms/tagA", "run": "run1"} + ] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests + ) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [ + { + "plugin": "histograms", + "tag": "histograms/tagA", + "run": "run1", + "runToSeries": { + "run1": [ + { + "wallTime": "", + "step": 0, + "bins": [ + {"min": -0.5, "max": 0.5, "count": 1.0} + ], + }, + { + "wallTime": "", + "step": 1, + "bins": [ + {"min": 9.5, "max": 10.5, "count": 1.0} + ], + }, + ] }, - { - "wallTime": "", - "step": 2, - "value": -200.0, + } + ], + clean_response, + ) + + def test_time_series_unmatching_request(self): + self._write_scalar_data("run1", "scalars/tagA", [0, 100, -200]) + + self._multiplexer.Reload() + + requests = [{"plugin": "scalars", "tag": "nothing-matches"}] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests + ) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [ + { + "plugin": "scalars", + "runToSeries": {}, + "tag": "nothing-matches", + } + ], + clean_response, + ) + + def test_time_series_multiple_runs(self): + self._write_scalar_data("run1", "scalars/tagA", [0]) + self._write_scalar_data("run2", "scalars/tagA", [1]) + self._write_scalar_data("run2", "scalars/tagB", [2]) + + self._multiplexer.Reload() + + requests = [{"plugin": "scalars", "tag": "scalars/tagA"}] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests + ) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [ + { + "plugin": "scalars", + "runToSeries": { + "run1": [ + { + "step": 0, + "value": 0.0, + "wallTime": "", + }, + ], + "run2": [ + { + "step": 0, + "value": 1.0, + "wallTime": "", + }, + ], }, - ] - }, - }], - clean_response, - ) - - def test_time_series_histogram(self): - self._write_histogram_data("run1", "histograms/tagA", [0, 10]) - self._multiplexer.Reload() - - requests = [ - { - "plugin": "histograms", - "tag": "histograms/tagA", - "run": "run1" - } - ] - response = self._plugin._time_series_impl( - context.RequestContext(), "", requests) - clean_response = self._clean_time_series_responses(response) - - self.assertEqual( - [{ - "plugin": "histograms", - "tag": "histograms/tagA", - "run": "run1", - "runToSeries": { - "run1": [ - { - "wallTime": "", - "step": 0, - "bins": [{"min": -0.5, "max": 0.5, "count": 1.0}], + "tag": "scalars/tagA", + } + ], + clean_response, + ) + + def test_time_series_multiple_requests(self): + self._write_scalar_data("run1", "scalars/tagA", [0]) + self._write_scalar_data("run2", "scalars/tagB", [1]) + + self._multiplexer.Reload() + + requests = [ + {"plugin": "scalars", "tag": "scalars/tagA"}, + {"plugin": "scalars", "tag": "scalars/tagB"}, + {"plugin": "scalars", "tag": "scalars/tagB"}, + ] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests + ) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [ + { + "plugin": "scalars", + "runToSeries": { + "run1": [ + { + "step": 0, + "value": 0.0, + "wallTime": "", + }, + ], }, - { - "wallTime": "", - "step": 1, - "bins": [{"min": 9.5, "max": 10.5, "count": 1.0}], + "tag": "scalars/tagA", + }, + { + "plugin": "scalars", + "runToSeries": { + "run2": [ + { + "step": 0, + "value": 1.0, + "wallTime": "", + }, + ], }, - ] - }, - }], - clean_response, - ) - - def test_time_series_unmatching_request(self): - self._write_scalar_data("run1", "scalars/tagA", [0, 100, -200]) - - self._multiplexer.Reload() - - requests = [{"plugin": "scalars", "tag": "nothing-matches"}] - response = self._plugin._time_series_impl( - context.RequestContext(), "", requests) - clean_response = self._clean_time_series_responses(response) - - self.assertEqual([{ - "plugin": "scalars", - "runToSeries": {}, - "tag": "nothing-matches" - }], clean_response) - - def test_time_series_multiple_runs(self): - self._write_scalar_data("run1", "scalars/tagA", [0]) - self._write_scalar_data("run2", "scalars/tagA", [1]) - self._write_scalar_data("run2", "scalars/tagB", [2]) - - self._multiplexer.Reload() - - requests = [{"plugin": "scalars", "tag": "scalars/tagA"}] - response = self._plugin._time_series_impl( - context.RequestContext(), "", requests) - clean_response = self._clean_time_series_responses(response) - - self.assertEqual( - [{ - "plugin": "scalars", - "runToSeries": { - "run1": [{ - "step": 0, - "value": 0.0, - "wallTime": "", - },], - "run2": [{ - "step": 0, - "value": 1.0, - "wallTime": "", - },], - }, - "tag": "scalars/tagA", - }], - clean_response, - ) - - def test_time_series_multiple_requests(self): - self._write_scalar_data("run1", "scalars/tagA", [0]) - self._write_scalar_data("run2", "scalars/tagB", [1]) - - self._multiplexer.Reload() - - requests = [ - { - "plugin": "scalars", - "tag": "scalars/tagA" - }, - { - "plugin": "scalars", - "tag": "scalars/tagB" - }, - { - "plugin": "scalars", - "tag": "scalars/tagB" - }, - ] - response = self._plugin._time_series_impl( - context.RequestContext(), "", requests) - clean_response = self._clean_time_series_responses(response) - - self.assertEqual( - [{ - "plugin": "scalars", - "runToSeries": { - "run1": [{ - "step": 0, - "value": 0.0, - "wallTime": "", - },], - }, - "tag": "scalars/tagA", - }, { - "plugin": "scalars", - "runToSeries": { - "run2": [{ - "step": 0, - "value": 1.0, - "wallTime": "", - },], - }, - "tag": "scalars/tagB", - }, { - "plugin": "scalars", - "runToSeries": { - "run2": [{ - "step": 0, - "value": 1.0, - "wallTime": "", - },], - }, - "tag": "scalars/tagB", - }], - clean_response, - ) - - def test_time_series_single_request_specific_run(self): - self._write_scalar_data("run1", "scalars/tagA", [0]) - self._write_scalar_data("run2", "scalars/tagA", [1]) - - self._multiplexer.Reload() - - requests = [{"plugin": "scalars", "tag": "scalars/tagA", "run": "run2"}] - response = self._plugin._time_series_impl( - context.RequestContext(), "", requests) - clean_response = self._clean_time_series_responses(response) - - self.assertEqual( - [{ - "plugin": "scalars", - "runToSeries": { - "run2": [{ - "step": 0, - "value": 1.0, - "wallTime": "", - },], + "tag": "scalars/tagB", + }, + { + "plugin": "scalars", + "runToSeries": { + "run2": [ + { + "step": 0, + "value": 1.0, + "wallTime": "", + }, + ], + }, + "tag": "scalars/tagB", + }, + ], + clean_response, + ) + + def test_time_series_single_request_specific_run(self): + self._write_scalar_data("run1", "scalars/tagA", [0]) + self._write_scalar_data("run2", "scalars/tagA", [1]) + + self._multiplexer.Reload() + + requests = [{"plugin": "scalars", "tag": "scalars/tagA", "run": "run2"}] + response = self._plugin._time_series_impl( + context.RequestContext(), "", requests + ) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [ + { + "plugin": "scalars", + "runToSeries": { + "run2": [ + { + "step": 0, + "value": 1.0, + "wallTime": "", + }, + ], + }, + "tag": "scalars/tagA", + "run": "run2", + } + ], + clean_response, + ) + + def test_image_data(self): + self._write_image("run1", "images/tagA", 1, None) + self._multiplexer.Reload() + + # Get the blob_key manually. + image_id = self._get_image_blob_key( + "run1", "images/tagA", step=0, sample=0 + ) + (data, content_type) = self._plugin._image_data_impl( + context.RequestContext(), image_id + ) + + self.assertIsInstance(data, bytes) + self.assertEqual(content_type, "image/png") + self.assertGreater(len(data), 0) + + def test_time_series_bad_arguments(self): + requests = [ + {"plugin": "images"}, + {"plugin": "unknown_plugin", "tag": "tagA"}, + ] + response = self._plugin._time_series_impl( + context.RequestContext(), "expid", requests + ) + errors = [ + series_response.get("error", "") for series_response in response + ] + + self.assertEqual(errors, ["Missing tag", "Invalid plugin"]) + + def test_image_data_from_time_series_query(self): + self._write_image("run1", "images/tagA", samples=3) + self._multiplexer.Reload() + + requests = [ + { + "plugin": "images", + "tag": "images/tagA", + "run": "run1", + "sample": 2, + } + ] + original_response = self._plugin._time_series_impl( + context.RequestContext(), "expid", requests + ) + response = self._plugin._time_series_impl( + context.RequestContext(), "expid", requests + ) + clean_response = self._clean_time_series_responses(response) + + self.assertEqual( + [ + { + "plugin": "images", + "tag": "images/tagA", + "run": "run1", + "sample": 2, + "runToSeries": { + "run1": [ + { + "wallTime": "", + "step": 0, + "imageId": "", + } + ] + }, + } + ], + clean_response, + ) + + image_id = original_response[0]["runToSeries"]["run1"][0]["imageId"] + (data, content_type) = self._plugin._image_data_impl( + context.RequestContext(), image_id + ) + + self.assertIsInstance(data, bytes) + self.assertGreater(len(data), 0) + + def test_image_bad_request(self): + self._write_image("run1", "images/tagA", 1, None) + self._multiplexer.Reload() + + invalid_sample = 999 + requests = [ + { + "plugin": "images", + "tag": "images/tagA", + "sample": invalid_sample, + "run": "run1", }, - "tag": "scalars/tagA", - "run": "run2", - }], - clean_response, - ) - - def test_image_data(self): - self._write_image("run1", "images/tagA", 1, None) - self._multiplexer.Reload() - - # Get the blob_key manually. - image_id = self._get_image_blob_key("run1", "images/tagA", step=0, sample=0) - (data, content_type) = self._plugin._image_data_impl( - context.RequestContext(), image_id) - - self.assertIsInstance(data, bytes) - self.assertEqual(content_type, 'image/png') - self.assertGreater(len(data), 0) - - def test_time_series_bad_arguments(self): - requests = [ - { - "plugin": "images" - }, - { - "plugin": "unknown_plugin", - "tag": "tagA" - }, - ] - response = self._plugin._time_series_impl( - context.RequestContext(), "expid", requests) - errors = [series_response.get("error", "") for series_response in response] - - self.assertEqual(errors, ["Missing tag", "Invalid plugin"]) - - def test_image_data_from_time_series_query(self): - self._write_image("run1", "images/tagA", samples=3) - self._multiplexer.Reload() - - requests = [ - { - "plugin": "images", - "tag": "images/tagA", - "run": "run1", - "sample": 2 - } - ] - original_response = self._plugin._time_series_impl( - context.RequestContext(), "expid", requests) - response = self._plugin._time_series_impl( - context.RequestContext(), "expid", requests) - clean_response = self._clean_time_series_responses(response) - - self.assertEqual([{ - "plugin": "images", - "tag": "images/tagA", - "run": "run1", - "sample": 2, - "runToSeries": { - "run1": [{ - "wallTime": "", - "step": 0, - "imageId": "" - }] - } - }], clean_response) - - image_id = original_response[0]["runToSeries"]["run1"][0]["imageId"] - (data, content_type) = self._plugin._image_data_impl( - context.RequestContext(), image_id) - - self.assertIsInstance(data, bytes) - self.assertGreater(len(data), 0) - - def test_image_bad_request(self): - self._write_image("run1", "images/tagA", 1, None) - self._multiplexer.Reload() - - invalid_sample = 999 - requests = [ - { - "plugin": "images", - "tag": "images/tagA", - "sample": invalid_sample, - "run": "run1" - }, - { - "plugin": "images", - "tag": "images/tagA", - "run": "run1" - }, - { - "plugin": "images", - "tag": "images/tagA", - }, - ] - response = self._plugin._time_series_impl( - context.RequestContext(), "expid", requests) - errors = [series_response.get("error", "") for series_response in response] - - self.assertEqual(errors, ["", "Missing sample", "Missing run"]) + {"plugin": "images", "tag": "images/tagA", "run": "run1"}, + {"plugin": "images", "tag": "images/tagA",}, + ] + response = self._plugin._time_series_impl( + context.RequestContext(), "expid", requests + ) + errors = [ + series_response.get("error", "") for series_response in response + ] + + self.assertEqual(errors, ["", "Missing sample", "Missing run"]) if __name__ == "__main__": - tf.test.main() + tf.test.main() From 938e294cbd745a1e37a52b83eaf3d9c84cb3c538 Mon Sep 17 00:00:00 2001 From: E Date: Thu, 17 Sep 2020 18:35:38 -0700 Subject: [PATCH 4/4] run docformatter --- tensorboard/plugins/metrics/metrics_plugin.py | 220 +++++++++--------- .../plugins/metrics/metrics_plugin_test.py | 23 +- 2 files changed, 122 insertions(+), 121 deletions(-) diff --git a/tensorboard/plugins/metrics/metrics_plugin.py b/tensorboard/plugins/metrics/metrics_plugin.py index 5cce912a5a..f6895dbfee 100644 --- a/tensorboard/plugins/metrics/metrics_plugin.py +++ b/tensorboard/plugins/metrics/metrics_plugin.py @@ -81,17 +81,17 @@ def _get_tag_description_info(mapping): def _build_combined_description(descriptions, description_to_runs): """Creates a single description from a set of descriptions. - Descriptions may be composites when a single tag has different descriptions - across multiple runs. + Descriptions may be composites when a single tag has different descriptions + across multiple runs. - Args: - descriptions: A list of description strings. - description_to_runs: A map from description strings to a set of run - strings. + Args: + descriptions: A list of description strings. + description_to_runs: A map from description strings to a set of run + strings. - Returns: - The combined description string. - """ + Returns: + The combined description string. + """ prefixed_descriptions = [] for description in descriptions: runs = sorted(description_to_runs[description]) @@ -154,15 +154,15 @@ def _get_run_tag_info(mapping): def _format_basic_mapping(mapping): """Prepares a scalar or histogram mapping for client consumption. - Args: - mapping: a nested map `d` such that `d[run][tag]` is a time series - produced by DataProvider's `list_*` methods. + Args: + mapping: a nested map `d` such that `d[run][tag]` is a time series + produced by DataProvider's `list_*` methods. - Returns: - A dict with the following fields: - runTagInfo: the return type of `_get_run_tag_info` - tagDescriptions: the return type of `_get_tag_to_description` - """ + Returns: + A dict with the following fields: + runTagInfo: the return type of `_get_run_tag_info` + tagDescriptions: the return type of `_get_tag_to_description` + """ return { "runTagInfo": _get_run_tag_info(mapping), "tagDescriptions": _get_tag_to_description(mapping), @@ -172,17 +172,17 @@ def _format_basic_mapping(mapping): def _format_image_blob_sequence_datum(sorted_datum_list, sample): """Formats image metadata from a list of BlobSequenceDatum's for clients. - This expects that frontend clients need to access images based on the - run+tag+sample. + This expects that frontend clients need to access images based on the + run+tag+sample. - Args: - sorted_datum_list: a list of DataProvider's `BlobSequenceDatum`, sorted by - step. This can be produced via DataProvider's `read_blob_sequences`. - sample: zero-indexed integer for the requested sample. + Args: + sorted_datum_list: a list of DataProvider's `BlobSequenceDatum`, sorted by + step. This can be produced via DataProvider's `read_blob_sequences`. + sample: zero-indexed integer for the requested sample. - Returns: - A list of `ImageStepDatum` (see http_api.md). - """ + Returns: + A list of `ImageStepDatum` (see http_api.md). + """ # For images, ignore the first 2 items of a BlobSequenceDatum's values, which # correspond to width, height. index = sample + 2 @@ -204,20 +204,20 @@ def _format_image_blob_sequence_datum(sorted_datum_list, sample): def _get_tag_run_image_info(mapping): """Returns a map of tag names to run information. - Args: - mapping: the result of DataProvider's `list_blob_sequences`. - - Returns: - A nested map from run strings to tag string to image info, where image - info is an object of form {"maxSamplesPerStep": num}. For example, - { - "reshaped": { - "test": {"maxSamplesPerStep": 1}, - "train": {"maxSamplesPerStep": 1} - }, - "convolved": {"test": {"maxSamplesPerStep": 50}}, - } - """ + Args: + mapping: the result of DataProvider's `list_blob_sequences`. + + Returns: + A nested map from run strings to tag string to image info, where image + info is an object of form {"maxSamplesPerStep": num}. For example, + { + "reshaped": { + "test": {"maxSamplesPerStep": 1}, + "train": {"maxSamplesPerStep": 1} + }, + "convolved": {"test": {"maxSamplesPerStep": 50}}, + } + """ tag_run_image_info = collections.defaultdict(dict) for (run, tag_to_content) in mapping.items(): for (tag, metadatum) in tag_to_content.items(): @@ -230,14 +230,14 @@ def _get_tag_run_image_info(mapping): def _format_image_mapping(mapping): """Prepares an image mapping for client consumption. - Args: - mapping: the result of DataProvider's `list_blob_sequences`. + Args: + mapping: the result of DataProvider's `list_blob_sequences`. - Returns: - A dict with the following fields: - tagRunSampledInfo: the return type of `_get_tag_run_image_info` - tagDescriptions: the return type of `_get_tag_description_info` - """ + Returns: + A dict with the following fields: + tagRunSampledInfo: the return type of `_get_tag_run_image_info` + tagDescriptions: the return type of `_get_tag_description_info` + """ return { "tagDescriptions": _get_tag_to_description(mapping), "tagRunSampledInfo": _get_tag_run_image_info(mapping), @@ -252,10 +252,10 @@ class MetricsPlugin(base_plugin.TBPlugin): def __init__(self, context): """Instantiates MetricsPlugin. - Args: - context: A base_plugin.TBContext instance. MetricsLoader checks that - it contains a valid `data_provider`. - """ + Args: + context: A base_plugin.TBContext instance. MetricsLoader checks that + it contains a valid `data_provider`. + """ self._data_provider = context.data_provider # For histograms, use a round number + 1 since sampling includes both start @@ -298,14 +298,14 @@ def _serve_tags(self, request): def _tags_impl(self, ctx, experiment=None): """Returns tag metadata for a given experiment's logged metrics. - Args: - ctx: A `tensorboard.context.RequestContext` value. - experiment: optional string ID of the request's experiment. + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: optional string ID of the request's experiment. - Returns: - A nested dict 'd' with keys in ("scalars", "histograms", "images") - and values being the return type of _format_*mapping. - """ + Returns: + A nested dict 'd' with keys in ("scalars", "histograms", "images") + and values being the return type of _format_*mapping. + """ scalar_mapping = self._data_provider.list_scalars( ctx, experiment_id=experiment, @@ -348,14 +348,14 @@ def _serve_time_series(self, request): def _time_series_impl(self, ctx, experiment, series_requests): """Constructs a list of responses from a list of series requests. - Args: - ctx: A `tensorboard.context.RequestContext` value. - experiment: string ID of the request's experiment. - series_requests: a list of `TimeSeriesRequest` dicts (see http_api.md). + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: string ID of the request's experiment. + series_requests: a list of `TimeSeriesRequest` dicts (see http_api.md). - Returns: - A list of `TimeSeriesResponse` dicts (see http_api.md). - """ + Returns: + A list of `TimeSeriesResponse` dicts (see http_api.md). + """ responses = [ self._get_time_series(ctx, experiment, request) for request in series_requests @@ -402,14 +402,14 @@ def _get_invalid_request_error(self, series_request): def _get_time_series(self, ctx, experiment, series_request): """Returns time series data for a given tag, plugin. - Args: - ctx: A `tensorboard.context.RequestContext` value. - experiment: string ID of the request's experiment. - series_request: a `TimeSeriesRequest` (see http_api.md). + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: string ID of the request's experiment. + series_request: a `TimeSeriesRequest` (see http_api.md). - Returns: - A `TimeSeriesResponse` dict (see http_api.md). - """ + Returns: + A `TimeSeriesResponse` dict (see http_api.md). + """ tag = series_request.get("tag") run = series_request.get("run") plugin = series_request.get("plugin") @@ -443,15 +443,15 @@ def _get_time_series(self, ctx, experiment, series_request): def _get_run_to_scalar_series(self, ctx, experiment, tag, runs): """Builds a run-to-scalar-series dict for client consumption. - Args: - ctx: A `tensorboard.context.RequestContext` value. - experiment: a string experiment id. - tag: string of the requested tag. - runs: optional list of run names as strings. + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: a string experiment id. + tag: string of the requested tag. + runs: optional list of run names as strings. - Returns: - A map from string run names to `ScalarStepDatum` (see http_api.md). - """ + Returns: + A map from string run names to `ScalarStepDatum` (see http_api.md). + """ mapping = self._data_provider.read_scalars( ctx, experiment_id=experiment, @@ -479,12 +479,12 @@ def _get_run_to_scalar_series(self, ctx, experiment, tag, runs): def _format_histogram_datum_bins(self, datum): """Formats a histogram datum's bins for client consumption. - Args: - datum: a DataProvider's TensorDatum. + Args: + datum: a DataProvider's TensorDatum. - Returns: - A list of `HistogramBin`s (see http_api.md). - """ + Returns: + A list of `HistogramBin`s (see http_api.md). + """ numpy_list = datum.numpy.tolist() bins = [{"min": x[0], "max": x[1], "count": x[2]} for x in numpy_list] return bins @@ -492,15 +492,15 @@ def _format_histogram_datum_bins(self, datum): def _get_run_to_histogram_series(self, ctx, experiment, tag, runs): """Builds a run-to-histogram-series dict for client consumption. - Args: - ctx: A `tensorboard.context.RequestContext` value. - experiment: a string experiment id. - tag: string of the requested tag. - runs: optional list of run names as strings. + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: a string experiment id. + tag: string of the requested tag. + runs: optional list of run names as strings. - Returns: - A map from string run names to `HistogramStepDatum` (see http_api.md). - """ + Returns: + A map from string run names to `HistogramStepDatum` (see http_api.md). + """ mapping = self._data_provider.read_tensors( ctx, experiment_id=experiment, @@ -528,16 +528,16 @@ def _get_run_to_histogram_series(self, ctx, experiment, tag, runs): def _get_run_to_image_series(self, ctx, experiment, tag, sample, runs): """Builds a run-to-image-series dict for client consumption. - Args: - ctx: A `tensorboard.context.RequestContext` value. - experiment: a string experiment id. - tag: string of the requested tag. - sample: zero-indexed integer for the requested sample. - runs: optional list of run names as strings. + Args: + ctx: A `tensorboard.context.RequestContext` value. + experiment: a string experiment id. + tag: string of the requested tag. + sample: zero-indexed integer for the requested sample. + runs: optional list of run names as strings. - Returns: - A `RunToSeries` dict (see http_api.md). - """ + Returns: + A `RunToSeries` dict (see http_api.md). + """ mapping = self._data_provider.read_blob_sequences( ctx, experiment_id=experiment, @@ -573,15 +573,15 @@ def _serve_image_data(self, request): def _image_data_impl(self, ctx, blob_key): """Gets the image data for a blob key. - Args: - ctx: A `tensorboard.context.RequestContext` value. - blob_key: a string identifier for a DataProvider blob. + Args: + ctx: A `tensorboard.context.RequestContext` value. + blob_key: a string identifier for a DataProvider blob. - Returns: - A tuple containing: - data: a raw bytestring of the requested image's contents. - content_type: a string HTTP content type. - """ + Returns: + A tuple containing: + data: a raw bytestring of the requested image's contents. + content_type: a string HTTP content type. + """ data = self._data_provider.read_blob(ctx, blob_key=blob_key) image_type = imghdr.what(None, data) content_type = _IMGHDR_TO_MIMETYPE.get( diff --git a/tensorboard/plugins/metrics/metrics_plugin_test.py b/tensorboard/plugins/metrics/metrics_plugin_test.py index dde94c6ecd..ddaf923077 100644 --- a/tensorboard/plugins/metrics/metrics_plugin_test.py +++ b/tensorboard/plugins/metrics/metrics_plugin_test.py @@ -70,11 +70,11 @@ def _write_scalar(self, run, tag, description=None): def _write_scalar_data(self, run, tag, data=[]): """Writes scalar data, starting at step 0. - Args: - run: string run name. - tag: string tag name. - data: list of scalar values to write at each step. - """ + Args: + run: string run name. + tag: string tag name. + data: list of scalar values to write at each step. + """ subdir = os.path.join(self._logdir, run) writer = tf.summary.create_file_writer(subdir) @@ -99,11 +99,11 @@ def _write_histogram(self, run, tag, description=None): def _write_histogram_data(self, run, tag, data=[]): """Writes histogram data, starting at step 0. - Args: - run: string run name. - tag: string tag name. - data: list of histogram values to write at each step. - """ + Args: + run: string run name. + tag: string tag name. + data: list of histogram values to write at each step. + """ subdir = os.path.join(self._logdir, run) writer = tf.summary.create_file_writer(subdir) @@ -130,7 +130,8 @@ def _write_image(self, run, tag, samples=2, description=None): ### Misc utilities. def _clean_time_series_responses(self, responses): - """Cleans non-deterministic data from a TimeSeriesResponse, in place.""" + """Cleans non-deterministic data from a TimeSeriesResponse, in + place.""" for response in responses: run_to_series = response.get("runToSeries", {}) for (run, series) in run_to_series.items():