Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import json
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf

from google.protobuf import json_format
Expand Down Expand Up @@ -223,6 +224,25 @@ def _delete_example(self, request):
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')

def _parse_request_arguments(self, request):
"""Parses comma separated request arguments

Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.

Returns:
A tuple of lists for model parameters
"""
inference_addresses = request.args.get('inference_address').split(',')
model_names = request.args.get('model_name').split(',')
model_versions = request.args.get('model_version').split(',')
model_signatures = request.args.get('model_signature').split(',')
if len(model_names) != len(inference_addresses):
raise common_utils.InvalidUserInputError('Every model should have a ' +
'name and address.')
return inference_addresses, model_names, model_versions, model_signatures

@wrappers.Request.application
def _infer(self, request):
"""Returns JSON for the `vz-line-chart`s for a feature.
Expand All @@ -249,29 +269,36 @@ def _infer(self, request):
if request.method != 'GET':
tf.logging.error('%s requests are forbidden.', request.method)
return http_util.Respond(request, {'error': 'invalid non-GET request'},
'application/json', code=405)
'application/json', code=405)

(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)

serving_bundle = inference_utils.ServingBundle(
request.args.get('inference_address'),
request.args.get('model_name'), request.args.get('model_type'),
request.args.get('model_version'),
request.args.get('model_signature'),
request.args.get('use_predict') == 'true',
request.args.get('predict_input_tensor'),
request.args.get('predict_output_tensor'))
indices_to_infer = sorted(self.updated_example_indices)
examples_to_infer = [self.examples[index] for index in indices_to_infer]

# Get inference results proto and combine with indices of inferred
# examples and respond with this data as json.
inference_result_proto = platform_utils.call_servo(
examples_to_infer, serving_bundle)
new_inferences = inference_utils.wrap_inference_results(
inference_result_proto)
infer_json = json_format.MessageToJson(
new_inferences, including_default_value_fields=True)
infer_obj = json.loads(infer_json)
resp = {'indices': indices_to_infer, 'results': infer_obj}
infer_objs = []
for model_num in xrange(len(inference_addresses)):
serving_bundle = inference_utils.ServingBundle(
inference_addresses[model_num],
model_names[model_num],
request.args.get('model_type'),
model_versions[model_num],
model_signatures[model_num],
request.args.get('use_predict') == 'true',
request.args.get('predict_input_tensor'),
request.args.get('predict_output_tensor'))

# Get inference results proto and combine with indices of inferred
# examples and respond with this data as json.
inference_result_proto = platform_utils.call_servo(
examples_to_infer, serving_bundle)
new_inferences = inference_utils.wrap_inference_results(
inference_result_proto)
infer_json = json_format.MessageToJson(
new_inferences, including_default_value_fields=True)
infer_objs.append(json.loads(infer_json))

resp = {'indices': indices_to_infer, 'results': infer_objs}
self.updated_example_indices = set()
return http_util.Respond(request, {'inferences': json.dumps(resp),
'vocab': json.dumps(label_vocab)},
Expand Down Expand Up @@ -401,14 +428,22 @@ def _infer_mutants_handler(self, request):
example_index = int(request.args.get('example_index', '0'))
feature_name = request.args.get('feature_name')
example = self.examples[example_index]

(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)

# TODO(tolgab) Generalize this to multiple models
model_num = 0
serving_bundle = inference_utils.ServingBundle(
request.args.get('inference_address'), request.args.get('model_name'),
inference_addresses[model_num],
model_names[model_num],
request.args.get('model_type'),
request.args.get('model_version'),
request.args.get('model_signature'),
model_versions[model_num],
model_signatures[model_num],
request.args.get('use_predict') == 'true',
request.args.get('predict_input_tensor'),
request.args.get('predict_output_tensor'))

viz_params = inference_utils.VizParams(
request.args.get('x_min'), request.args.get('x_max'),
self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,9 @@ def test_infer(self, mock_call_servo):
'/data/plugin/whatif/infer?' + urllib_parse.urlencode({
'inference_address': 'addr',
'model_name': 'name',
'model_type': 'regression'
'model_type': 'regression',
'model_version': ',',
'model_signature': ',',
}))

self.assertEqual(200, response.status_code)
Expand Down Expand Up @@ -199,7 +201,7 @@ def pass_through(example, feature_name, serving_bundle, viz_params):
'serving_bundle': {
'inference_address': serving_bundle.inference_address,
'model_name': serving_bundle.model_name,
'model_type': serving_bundle.model_type
'model_type': serving_bundle.model_type,
},
'viz_params': {
'x_min': viz_params.x_min,
Expand All @@ -218,6 +220,8 @@ def pass_through(example, feature_name, serving_bundle, viz_params):
'model_name': '/ml/cassandrax/iris_classification',
'inference_address': 'ml-serving-temp.prediction',
'model_type': 'classification',
'model_version': ',',
'model_signature': ',',
'x_min': '-10',
'x_max': '10',
}))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,31 @@ load("//tensorboard/defs:vulcanize.bzl", "tensorboard_html_binary")

licenses(["notice"]) # Apache 2.0

tf_web_library(
name = "multidemo",
testonly = True,
srcs = [
"multi_index.html",
"tf-interactive-inference-multi-demo.html",
"@org_tensorflow_tfjs//:tf.min.js",
] + glob(["data/**"]),
path = "/tf-interactive-inference-dashboard",
deps = [
"//tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard",
"//tensorboard/components/tf_imports:polymer",
"//tensorboard/components/tf_imports:webcomponentsjs",
],
)

tensorboard_html_binary(
name = "multidemoserver",
testonly = True, # Keeps JavaScript somewhat readable
compile = True, # Run Closure Compiler
input_path = "/tf-interactive-inference-dashboard/multi_index.html",
output_path = "/tf-interactive-inference-dashboard/multi_demo.html",
deps = [":multidemo"],
)

tf_web_library(
name = "demo",
testonly = True,
Expand Down
Original file line number Diff line number Diff line change
@@ -1 +1,193 @@
{"modelTopology": {"training_config": {"metrics": [], "loss": "binary_crossentropy", "optimizer_config": {"class_name": "RMSprop", "config": {"epsilon": 1e-07, "lr": 0.0010000000474974513, "rho": 0.8999999761581421, "decay": 0.0}}, "sample_weight_mode": null, "loss_weights": null}, "keras_version": "2.1.4", "model_config": {"class_name": "Sequential", "config": [{"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_1", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "dtype": "float32", "activation": "sigmoid", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 36, "batch_input_shape": [null, 104], "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_2", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "dtype": "float32", "activation": "sigmoid", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 12, "batch_input_shape": [null, 36], "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_3", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "dtype": "float32", "activation": "sigmoid", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 2, "batch_input_shape": [null, 12], "use_bias": true, "activity_regularizer": null}}]}, "backend": "tensorflow"}, "weightsManifest": [{"paths": ["group1-shard1of1"], "weights": [{"dtype": "float32", "shape": [104, 36], "name": "dense_1/kernel"}, {"dtype": "float32", "shape": [36], "name": "dense_1/bias"}]}, {"paths": ["group2-shard1of1"], "weights": [{"dtype": "float32", "shape": [36, 12], "name": "dense_2/kernel"}, {"dtype": "float32", "shape": [12], "name": "dense_2/bias"}]}, {"paths": ["group3-shard1of1"], "weights": [{"dtype": "float32", "shape": [12, 2], "name": "dense_3/kernel"}, {"dtype": "float32", "shape": [2], "name": "dense_3/bias"}]}]}
{
"modelTopology": {
"training_config": {
"metrics": [],
"loss": "binary_crossentropy",
"optimizer_config": {
"class_name": "RMSprop",
"config": {
"epsilon": 1E-7,
"lr": 0.0010000000474974513,
"rho": 0.8999999761581421,
"decay": 0.0
}
},
"sample_weight_mode": null,
"loss_weights": null
},
"keras_version": "2.1.4",
"model_config": {
"class_name": "Sequential",
"config": [
{
"class_name": "Dense",
"config": {
"kernel_initializer": {
"class_name": "VarianceScaling",
"config": {
"distribution": "uniform",
"scale": 1.0,
"seed": null,
"mode": "fan_avg"
}
},
"name": "dense_1",
"kernel_constraint": null,
"bias_regularizer": null,
"bias_constraint": null,
"dtype": "float32",
"activation": "sigmoid",
"trainable": true,
"kernel_regularizer": null,
"bias_initializer": {
"class_name": "Zeros",
"config": {}
},
"units": 36,
"batch_input_shape": [
null,
104
],
"use_bias": true,
"activity_regularizer": null
}
},
{
"class_name": "Dense",
"config": {
"kernel_initializer": {
"class_name": "VarianceScaling",
"config": {
"distribution": "uniform",
"scale": 1.0,
"seed": null,
"mode": "fan_avg"
}
},
"name": "dense_2",
"kernel_constraint": null,
"bias_regularizer": null,
"bias_constraint": null,
"dtype": "float32",
"activation": "sigmoid",
"trainable": true,
"kernel_regularizer": null,
"bias_initializer": {
"class_name": "Zeros",
"config": {}
},
"units": 12,
"batch_input_shape": [
null,
36
],
"use_bias": true,
"activity_regularizer": null
}
},
{
"class_name": "Dense",
"config": {
"kernel_initializer": {
"class_name": "VarianceScaling",
"config": {
"distribution": "uniform",
"scale": 1.0,
"seed": null,
"mode": "fan_avg"
}
},
"name": "dense_3",
"kernel_constraint": null,
"bias_regularizer": null,
"bias_constraint": null,
"dtype": "float32",
"activation": "sigmoid",
"trainable": true,
"kernel_regularizer": null,
"bias_initializer": {
"class_name": "Zeros",
"config": {}
},
"units": 2,
"batch_input_shape": [
null,
12
],
"use_bias": true,
"activity_regularizer": null
}
}
]
},
"backend": "tensorflow"
},
"weightsManifest": [
{
"paths": [
"group1-shard1of1"
],
"weights": [
{
"dtype": "float32",
"shape": [
104,
36
],
"name": "dense_1/kernel"
},
{
"dtype": "float32",
"shape": [
36
],
"name": "dense_1/bias"
}
]
},
{
"paths": [
"group2-shard1of1"
],
"weights": [
{
"dtype": "float32",
"shape": [
36,
12
],
"name": "dense_2/kernel"
},
{
"dtype": "float32",
"shape": [
12
],
"name": "dense_2/bias"
}
]
},
{
"paths": [
"group3-shard1of1"
],
"weights": [
{
"dtype": "float32",
"shape": [
12,
2
],
"name": "dense_3/kernel"
},
{
"dtype": "float32",
"shape": [
2
],
"name": "dense_3/bias"
}
]
}
]
}
Binary file not shown.
Loading