diff --git a/tensorboard/plugins/interactive_inference/interactive_inference_plugin.py b/tensorboard/plugins/interactive_inference/interactive_inference_plugin.py
index 04d3248d11..c11a31978f 100644
--- a/tensorboard/plugins/interactive_inference/interactive_inference_plugin.py
+++ b/tensorboard/plugins/interactive_inference/interactive_inference_plugin.py
@@ -97,6 +97,7 @@ def get_plugin_apps(self):
'/delete_example': self._delete_example,
'/infer_mutants': self._infer_mutants_handler,
'/eligible_features': self._eligible_features_from_example_handler,
+ '/sort_eligible_features': self._sort_eligible_features_handler,
}
def is_active(self):
@@ -322,9 +323,51 @@ def _eligible_features_from_example_handler(self, request):
self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)
return http_util.Respond(request, features_list, 'application/json')
+ @wrappers.Request.application
+ def _sort_eligible_features_handler(self, request):
+ """Returns a sorted list of JSON objects for each feature in the example.
+
+ The list is sorted by interestingness in terms of the resulting change in
+ inference values across feature values, for partial dependence plots.
+
+ Args:
+ request: A request for sorted features.
+
+ Returns:
+ A sorted list with a JSON object for each feature.
+ Numeric features are represented as
+ {name: observedMin: observedMax: interestingness:}.
+ Categorical features are repesented as
+ {name: samples:[] interestingness:}.
+ """
+ try:
+ features_list = inference_utils.get_eligible_features(
+ self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)
+ example_index = int(request.args.get('example_index', '0'))
+ (inference_addresses, model_names, model_versions,
+ model_signatures) = self._parse_request_arguments(request)
+ chart_data = {}
+ for feat in features_list:
+ chart_data[feat['name']] = self._infer_mutants_impl(
+ feat['name'], example_index,
+ inference_addresses, model_names, request.args.get('model_type'),
+ model_versions, model_signatures,
+ request.args.get('use_predict') == 'true',
+ request.args.get('predict_input_tensor'),
+ request.args.get('predict_output_tensor'),
+ feat['observedMin'] if 'observedMin' in feat else 0,
+ feat['observedMax'] if 'observedMin' in feat else 0,
+ None)
+ features_list = inference_utils.sort_eligible_features(
+ features_list, chart_data)
+ return http_util.Respond(request, features_list, 'application/json')
+ except common_utils.InvalidUserInputError as e:
+ return http_util.Respond(request, {'error': e.message},
+ 'application/json', code=400)
+
@wrappers.Request.application
def _infer_mutants_handler(self, request):
- """Returns JSON for the `vz-line-chart`s for a feature.
+ """Returns JSON for the partial dependence plots for a feature.
Args:
request: A request that should contain 'feature_name', 'example_index',
@@ -342,31 +385,43 @@ def _infer_mutants_handler(self, request):
example_index = int(request.args.get('example_index', '0'))
feature_name = request.args.get('feature_name')
- examples = (self.examples if example_index == -1
- else [self.examples[example_index]])
-
(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)
-
- serving_bundles = []
- for model_num in xrange(len(inference_addresses)):
- serving_bundles.append(inference_utils.ServingBundle(
- inference_addresses[model_num],
- model_names[model_num],
- request.args.get('model_type'),
- model_versions[model_num],
- model_signatures[model_num],
- request.args.get('use_predict') == 'true',
- request.args.get('predict_input_tensor'),
- request.args.get('predict_output_tensor')))
-
- viz_params = inference_utils.VizParams(
+ json_mapping = self._infer_mutants_impl(feature_name, example_index,
+ inference_addresses, model_names, request.args.get('model_type'),
+ model_versions, model_signatures,
+ request.args.get('use_predict') == 'true',
+ request.args.get('predict_input_tensor'),
+ request.args.get('predict_output_tensor'),
request.args.get('x_min'), request.args.get('x_max'),
- self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS,
request.args.get('feature_index_pattern'))
- json_mapping = inference_utils.mutant_charts_for_feature(
- examples, feature_name, serving_bundles, viz_params)
return http_util.Respond(request, json_mapping, 'application/json')
except common_utils.InvalidUserInputError as e:
return http_util.Respond(request, {'error': e.message},
'application/json', code=400)
+
+ def _infer_mutants_impl(self, feature_name, example_index, inference_addresses,
+ model_names, model_type, model_versions, model_signatures, use_predict,
+ predict_input_tensor, predict_output_tensor, x_min, x_max,
+ feature_index_pattern):
+ """Helper for generating PD plots for a feature."""
+ examples = (self.examples if example_index == -1
+ else [self.examples[example_index]])
+ serving_bundles = []
+ for model_num in xrange(len(inference_addresses)):
+ serving_bundles.append(inference_utils.ServingBundle(
+ inference_addresses[model_num],
+ model_names[model_num],
+ model_type,
+ model_versions[model_num],
+ model_signatures[model_num],
+ use_predict,
+ predict_input_tensor,
+ predict_output_tensor))
+
+ viz_params = inference_utils.VizParams(
+ x_min, x_max,
+ self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS,
+ feature_index_pattern)
+ return inference_utils.mutant_charts_for_feature(
+ examples, feature_name, serving_bundles, viz_params)
diff --git a/tensorboard/plugins/interactive_inference/interactive_inference_plugin_test.py b/tensorboard/plugins/interactive_inference/interactive_inference_plugin_test.py
index cce07689c8..269adb7cb7 100644
--- a/tensorboard/plugins/interactive_inference/interactive_inference_plugin_test.py
+++ b/tensorboard/plugins/interactive_inference/interactive_inference_plugin_test.py
@@ -232,6 +232,39 @@ def pass_through(example, feature_name, serving_bundles, viz_params):
self.assertAlmostEqual(-10, result['viz_params']['x_min'])
self.assertAlmostEqual(10, result['viz_params']['x_max'])
+ @mock.patch.object(inference_utils, 'sort_eligible_features')
+ @mock.patch.object(inference_utils, 'mutant_charts_for_feature')
+ def test_infer(
+ self, mock_mutant_charts_for_feature, mock_sort_eligible_features):
+ self.plugin.examples = [
+ self.get_fake_example(0),
+ self.get_fake_example(1),
+ self.get_fake_example(2)
+ ]
+
+ mock_mutant_charts_for_feature.return_value = []
+ sorted_features_list = [
+ {'name': 'feat1', 'interestingness': .2},
+ {'name': 'feat2', 'interestingness': .1}
+ ]
+ mock_sort_eligible_features.return_value = sorted_features_list
+
+ url_options = urllib_parse.urlencode({
+ 'inference_address': 'addr',
+ 'model_name': 'name',
+ 'model_type': 'regression',
+ 'model_version': '',
+ 'model_signature': '',
+ })
+ response = self.server.get(
+ '/data/plugin/whatif/sort_eligible_features?' + url_options)
+
+ self.assertEqual(200, response.status_code)
+ self.assertEqual(0, len(self.plugin.updated_example_indices))
+ output_list = json.loads(response.get_data().decode('utf-8'))
+ self.assertEquals('feat1', output_list[0]['name'])
+ self.assertEquals('feat2', output_list[1]['name'])
+
if __name__ == '__main__':
tf.test.main()
diff --git a/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-age-demo.html b/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-age-demo.html
index 99afac1b5d..7f6ba93f38 100644
--- a/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-age-demo.html
+++ b/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-age-demo.html
@@ -332,125 +332,169 @@
});
this.$.dash.addEventListener('infer-mutants', async (e) => {
const method = async () => {
- const examples = [];
- const featureMapping = {
- 'capital-gain': 0,
- 'capital-loss': 1,
- education: 2,
- 'education-num': 3,
- 'hours-per-week': 4,
- 'marital-status': 5,
- 'native-country': 6,
- occupation: 7,
- race: 8,
- relationship: 9,
- sex: 10,
- workclass: 11,
- };
- const xMin = +e.detail.x_min;
- const xMax = +e.detail.x_max;
- const isNum = !this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples;
- const numTrials = isNum
- ? 10
- : this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples.length;
- const exampleIndices =
- e.detail.example_index == -1
- ? [...Array(this.data.length).keys()]
- : [e.detail.example_index];
- for (let i = 0; i < numTrials; i++) {
- for (let idx = 0; idx < exampleIndices.length; idx++) {
- const ex = JSON.parse(
- JSON.stringify(this.data[exampleIndices[idx]])
- );
- if (isNum) {
- ex.features.feature[
- e.detail.feature_name
- ].floatList.value[0] =
- xMin + i * ((1 / (numTrials - 1)) * (xMax - xMin));
- } else {
- ex.features.feature[
- e.detail.feature_name
- ].bytesList.value[0] = btoa(
- this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples[i]
- );
- }
- examples.push(ex);
- }
- }
- const results = [];
- const predValuesList = [];
- const BATCH_SIZE = 128;
- for (let i = 0; i < examples.length; i += BATCH_SIZE) {
- let tlist = [];
- for (
- let idx = i;
- idx < Math.min(i + BATCH_SIZE, examples.length);
- idx++
- ) {
- tlist.push(this.convertExToTensor(examples[idx]));
- }
- const tconcat = tf.concat(tlist);
- tlist.forEach((tensor) => tensor.dispose());
- const input = tconcat.reshape([tlist.length, 105]);
- const res = this.model.predict(input, {batchSize: BATCH_SIZE});
- const vals = await res.data();
- predValuesList.push(vals);
- input.dispose();
- res.dispose();
- tconcat.dispose();
- }
- const predSize = predValuesList.reduce((a, b) => a + b.length, 0);
- const predValues = new Float32Array(predSize);
- let curIdx = 0;
- for (let i = 0; i < predValuesList.length; i++) {
- predValues.set(predValuesList[i], curIdx);
- curIdx += predValuesList[i].length;
- }
- for (let trialIdx = 0; trialIdx < numTrials; trialIdx++) {
- const startingPredIdx = trialIdx * exampleIndices.length;
- const scores = [];
- for (
- let exampleIdx = 0;
- exampleIdx < exampleIndices.length;
- exampleIdx++
- ) {
- scores.push(predValues[startingPredIdx + exampleIdx]);
+ const data = await this.createPdPlotData(
+ e.detail.feature_name,
+ e.detail.example_index,
+ +e.detail.x_min,
+ +e.detail.x_max
+ );
+ let isNum = false;
+ for (let entry of this.$.dash.partialDepPlotEligibleFeatures) {
+ if (entry.name == e.detail.feature_name && !entry.samples) {
+ isNum = true;
+ break;
}
- const score =
- scores.reduce((prev, cur) => prev + cur, 0) / scores.length;
- const adjustedScore = 2 * this.means['age'] * score;
- const ex = examples[trialIdx * exampleIndices.length];
- const step = isNum
- ? ex.features.feature[e.detail.feature_name].floatList
- .value[0]
- : [
- atob(
- ex.features.feature[e.detail.feature_name].bytesList
- .value[0]
- ),
- ];
- results.push({step: step, scalar: adjustedScore});
}
this.$.dash.makeChartForFeature(
isNum ? 'numeric' : 'categorical',
e.detail.feature_name,
- [[{value: results}]]
+ data
);
};
setTimeout(method, 50);
});
+ this.$.dash.addEventListener('sort-eligible-features', async (e) => {
+ const method = async () => {
+ const chartData = {};
+ const list = this.$.dash.partialDepPlotEligibleFeatures;
+ for (let feat of list) {
+ chartData[feat.name] = await this.createPdPlotData(
+ feat.name,
+ e.detail.example_index,
+ +feat.observedMin,
+ +feat.observedMax
+ );
+ }
+ for (let feat of list) {
+ const charts = chartData[feat.name];
+ let maxMeasure = 0;
+ const isNum = feat.samples == null;
+ for (let models of charts) {
+ for (let chart of models) {
+ for (let key in chart) {
+ const series = chart[key];
+ let measure = 0;
+ if (isNum) {
+ for (let i = 0; i < series.length - 1; i++) {
+ measure += Math.abs(
+ series[i].scalar - series[i + 1].scalar
+ );
+ }
+ } else {
+ let minY = Infinity;
+ let maxY = -Infinity;
+ for (let i = 0; i < series.length; i++) {
+ const val = series[i].scalar;
+ if (val < minY) {
+ minY = val;
+ }
+ if (val > maxY) {
+ maxY = val;
+ }
+ }
+ measure = maxY - minY;
+ }
+ if (measure > maxMeasure) {
+ maxMeasure = measure;
+ }
+ }
+ }
+ }
+ feat.interestingness = maxMeasure;
+ }
+ this.$.dash.partialDepPlotEligibleFeatures = [];
+ list.sort((a, b) => b.interestingness - a.interestingness);
+ this.$.dash.partialDepPlotEligibleFeatures = list;
+ };
+ setTimeout(method, 50);
+ });
requestAnimationFrame(() => {
this.$.dash.inferClicked_();
this.$.dash.selectedLabelFeature = 'age';
});
});
},
+ createPdPlotData: async function(featureName, exampleIndex, xMin, xMax) {
+ const examples = [];
+ let feat = null;
+ for (let entry of this.$.dash.partialDepPlotEligibleFeatures) {
+ if (entry.name == featureName) {
+ feat = entry;
+ break;
+ }
+ }
+ const isNum = !feat.samples;
+ const numTrials = isNum ? 10 : feat.samples.length;
+ const exampleIndices =
+ exampleIndex == -1
+ ? [...Array(this.data.length).keys()]
+ : [exampleIndex];
+ for (let i = 0; i < numTrials; i++) {
+ for (let idx = 0; idx < exampleIndices.length; idx++) {
+ const ex = JSON.parse(
+ JSON.stringify(this.data[exampleIndices[idx]])
+ );
+ if (isNum) {
+ ex.features.feature[featureName].floatList.value[0] =
+ xMin + i * ((1 / (numTrials - 1)) * (xMax - xMin));
+ } else {
+ ex.features.feature[featureName].bytesList.value[0] = btoa(
+ feat.samples[i]
+ );
+ }
+ examples.push(ex);
+ }
+ }
+ const results = [];
+ const predValuesList = [];
+ const BATCH_SIZE = 128;
+ for (let i = 0; i < examples.length; i += BATCH_SIZE) {
+ let tlist = [];
+ for (
+ let idx = i;
+ idx < Math.min(i + BATCH_SIZE, examples.length);
+ idx++
+ ) {
+ tlist.push(this.convertExToTensor(examples[idx]));
+ }
+ const tconcat = tf.concat(tlist);
+ tlist.forEach((tensor) => tensor.dispose());
+ const input = tconcat.reshape([tlist.length, 105]);
+ const res = this.model.predict(input, {batchSize: BATCH_SIZE});
+ const vals = await res.data();
+ predValuesList.push(vals);
+ input.dispose();
+ res.dispose();
+ tconcat.dispose();
+ }
+ const predSize = predValuesList.reduce((a, b) => a + b.length, 0);
+ const predValues = new Float32Array(predSize);
+ let curIdx = 0;
+ for (let i = 0; i < predValuesList.length; i++) {
+ predValues.set(predValuesList[i], curIdx);
+ curIdx += predValuesList[i].length;
+ }
+ for (let trialIdx = 0; trialIdx < numTrials; trialIdx++) {
+ const startingPredIdx = trialIdx * exampleIndices.length;
+ const scores = [];
+ for (
+ let exampleIdx = 0;
+ exampleIdx < exampleIndices.length;
+ exampleIdx++
+ ) {
+ scores.push(predValues[startingPredIdx + exampleIdx]);
+ }
+ const score =
+ scores.reduce((prev, cur) => prev + cur, 0) / scores.length;
+ const adjustedScore = 2 * this.means['age'] * score;
+ const ex = examples[trialIdx * exampleIndices.length];
+ const step = isNum
+ ? ex.features.feature[featureName].floatList.value[0]
+ : [atob(ex.features.feature[featureName].bytesList.value[0])];
+ results.push({step: step, scalar: adjustedScore});
+ }
+ return [[{'1': results}]];
+ },
convertExToTensor: function(ex) {
const vals = [];
diff --git a/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-demo.html b/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-demo.html
index f0f8c1700e..edd7912806 100644
--- a/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-demo.html
+++ b/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-demo.html
@@ -340,131 +340,169 @@
});
this.$.dash.addEventListener('infer-mutants', async (e) => {
const method = async () => {
- const examples = [];
- const featureMapping = {
- age: 0,
- 'capital-gain': 1,
- 'capital-loss': 2,
- education: 3,
- 'education-num': 4,
- 'hours-per-week': 5,
- 'marital-status': 6,
- 'native-country': 7,
- occupation: 8,
- over_50k: 9,
- race: 10,
- relationship: 11,
- sex: 12,
- workclass: 13,
- };
- const xMin = +e.detail.x_min;
- const xMax = +e.detail.x_max;
- const isNum = !this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples;
- const numTrials = isNum
- ? 10
- : this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples.length;
- const exampleIndices =
- e.detail.example_index == -1
- ? [...Array(this.data.length).keys()]
- : [e.detail.example_index];
- for (let i = 0; i < numTrials; i++) {
- for (let idx = 0; idx < exampleIndices.length; idx++) {
- const ex = JSON.parse(
- JSON.stringify(this.data[exampleIndices[idx]])
- );
- if (isNum) {
- ex.features.feature[
- e.detail.feature_name
- ].floatList.value[0] =
- xMin + i * ((1 / (numTrials - 1)) * (xMax - xMin));
- } else {
- ex.features.feature[
- e.detail.feature_name
- ].bytesList.value[0] = btoa(
- this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples[i]
- );
- }
- examples.push(ex);
- }
- }
- const PRED_SIZE = 2;
- const BATCH_SIZE = 128;
- const results = [];
- const predValuesList = [];
- for (let i = 0; i < examples.length; i += BATCH_SIZE) {
- let tlist = [];
- for (
- let idx = i;
- idx < Math.min(i + BATCH_SIZE, examples.length);
- idx++
- ) {
- tlist.push(this.convertExToTensor(examples[idx]));
- }
- const tconcat = tf.concat(tlist);
- tlist.forEach((tensor) => tensor.dispose());
- const input = tconcat.reshape([tlist.length, 104]);
- const res = this.model.predict(input, {batchSize: BATCH_SIZE});
- const vals = await res.data();
- predValuesList.push(vals);
- input.dispose();
- res.dispose();
- tconcat.dispose();
- }
- const predSize = predValuesList.reduce((a, b) => a + b.length, 0);
- const predValues = new Float32Array(predSize);
- let curIdx = 0;
- for (let i = 0; i < predValuesList.length; i++) {
- predValues.set(predValuesList[i], curIdx);
- curIdx += predValuesList[i].length;
- }
- for (let trialIdx = 0; trialIdx < numTrials; trialIdx++) {
- const startingPredIdx =
- trialIdx * exampleIndices.length * PRED_SIZE;
- const scores = [];
- for (
- let exampleIdx = 0;
- exampleIdx < exampleIndices.length;
- exampleIdx++
- ) {
- scores.push(
- predValues[startingPredIdx + exampleIdx * PRED_SIZE]
- );
+ const data = await this.createPdPlotData(
+ e.detail.feature_name,
+ e.detail.example_index,
+ +e.detail.x_min,
+ +e.detail.x_max
+ );
+ let isNum = false;
+ for (let entry of this.$.dash.partialDepPlotEligibleFeatures) {
+ if (entry.name == e.detail.feature_name && !entry.samples) {
+ isNum = true;
+ break;
}
- const score =
- scores.reduce((prev, cur) => prev + cur, 0) / scores.length;
- const ex = examples[trialIdx * exampleIndices.length];
- const step = isNum
- ? ex.features.feature[e.detail.feature_name].floatList
- .value[0]
- : [
- atob(
- ex.features.feature[e.detail.feature_name].bytesList
- .value[0]
- ),
- ];
- results.push({step: step, scalar: score});
}
-
this.$.dash.makeChartForFeature(
isNum ? 'numeric' : 'categorical',
e.detail.feature_name,
- [[{'1': results}]]
+ data
);
};
setTimeout(method, 50);
});
+ this.$.dash.addEventListener('sort-eligible-features', async (e) => {
+ const method = async () => {
+ const chartData = {};
+ const list = this.$.dash.partialDepPlotEligibleFeatures;
+ for (let feat of list) {
+ chartData[feat.name] = await this.createPdPlotData(
+ feat.name,
+ e.detail.example_index,
+ +feat.observedMin,
+ +feat.observedMax
+ );
+ }
+ for (let feat of list) {
+ const charts = chartData[feat.name];
+ let maxMeasure = 0;
+ const isNum = feat.samples == null;
+ for (let models of charts) {
+ for (let chart of models) {
+ for (let key in chart) {
+ const series = chart[key];
+ let measure = 0;
+ if (isNum) {
+ for (let i = 0; i < series.length - 1; i++) {
+ measure += Math.abs(
+ series[i].scalar - series[i + 1].scalar
+ );
+ }
+ } else {
+ let minY = Infinity;
+ let maxY = -Infinity;
+ for (let i = 0; i < series.length; i++) {
+ const val = series[i].scalar;
+ if (val < minY) {
+ minY = val;
+ }
+ if (val > maxY) {
+ maxY = val;
+ }
+ }
+ measure = maxY - minY;
+ }
+ if (measure > maxMeasure) {
+ maxMeasure = measure;
+ }
+ }
+ }
+ }
+ feat.interestingness = maxMeasure;
+ }
+ this.$.dash.partialDepPlotEligibleFeatures = [];
+ list.sort((a, b) => b.interestingness - a.interestingness);
+ this.$.dash.partialDepPlotEligibleFeatures = list;
+ };
+ setTimeout(method, 50);
+ });
requestAnimationFrame(() => {
this.$.dash.inferClicked_();
this.$.dash.selectedLabelFeature = 'over_50k';
});
});
},
+ createPdPlotData: async function(featureName, exampleIndex, xMin, xMax) {
+ const examples = [];
+ let feat = null;
+ for (let entry of this.$.dash.partialDepPlotEligibleFeatures) {
+ if (entry.name == featureName) {
+ feat = entry;
+ break;
+ }
+ }
+ const isNum = !feat.samples;
+ const numTrials = isNum ? 10 : feat.samples.length;
+ const exampleIndices =
+ exampleIndex == -1
+ ? [...Array(this.data.length).keys()]
+ : [exampleIndex];
+ for (let i = 0; i < numTrials; i++) {
+ for (let idx = 0; idx < exampleIndices.length; idx++) {
+ const ex = JSON.parse(
+ JSON.stringify(this.data[exampleIndices[idx]])
+ );
+ if (isNum) {
+ ex.features.feature[featureName].floatList.value[0] =
+ xMin + i * ((1 / (numTrials - 1)) * (xMax - xMin));
+ } else {
+ ex.features.feature[featureName].bytesList.value[0] = btoa(
+ feat.samples[i]
+ );
+ }
+ examples.push(ex);
+ }
+ }
+ const PRED_SIZE = 2;
+ const BATCH_SIZE = 128;
+ const results = [];
+ const predValuesList = [];
+ for (let i = 0; i < examples.length; i += BATCH_SIZE) {
+ let tlist = [];
+ for (
+ let idx = i;
+ idx < Math.min(i + BATCH_SIZE, examples.length);
+ idx++
+ ) {
+ tlist.push(this.convertExToTensor(examples[idx]));
+ }
+ const tconcat = tf.concat(tlist);
+ tlist.forEach((tensor) => tensor.dispose());
+ const input = tconcat.reshape([tlist.length, 104]);
+ const res = this.model.predict(input, {batchSize: BATCH_SIZE});
+ const vals = await res.data();
+ predValuesList.push(vals);
+ input.dispose();
+ res.dispose();
+ tconcat.dispose();
+ }
+ const predSize = predValuesList.reduce((a, b) => a + b.length, 0);
+ const predValues = new Float32Array(predSize);
+ let curIdx = 0;
+ for (let i = 0; i < predValuesList.length; i++) {
+ predValues.set(predValuesList[i], curIdx);
+ curIdx += predValuesList[i].length;
+ }
+ for (let trialIdx = 0; trialIdx < numTrials; trialIdx++) {
+ const startingPredIdx = trialIdx * exampleIndices.length * PRED_SIZE;
+ const scores = [];
+ for (
+ let exampleIdx = 0;
+ exampleIdx < exampleIndices.length;
+ exampleIdx++
+ ) {
+ scores.push(predValues[startingPredIdx + exampleIdx * PRED_SIZE]);
+ }
+ const score =
+ scores.reduce((prev, cur) => prev + cur, 0) / scores.length;
+ const ex = examples[trialIdx * exampleIndices.length];
+ const step = isNum
+ ? ex.features.feature[featureName].floatList.value[0]
+ : [atob(ex.features.feature[featureName].bytesList.value[0])];
+ results.push({step: step, scalar: score});
+ }
+ return [[{'1': results}]];
+ },
convertExToTensor: function(ex) {
const vals = [];
diff --git a/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-iris-demo.html b/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-iris-demo.html
index eb3ca36a1e..b116b7bc5b 100644
--- a/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-iris-demo.html
+++ b/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-iris-demo.html
@@ -133,134 +133,182 @@
});
this.$.dash.addEventListener('infer-mutants', async (e) => {
const method = async () => {
- const examples = [];
- const featureMapping = {
- 'sepal-length': 0,
- 'sepal-width': 1,
- 'petal-length': 2,
- 'petal-width': 3,
- };
- const xMin = +e.detail.x_min;
- const xMax = +e.detail.x_max;
- const isNum = !this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples;
- const numTrials = isNum
- ? 10
- : this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples.length;
- const exampleIndices =
- e.detail.example_index == -1
- ? [...Array(this.data.length).keys()]
- : [e.detail.example_index];
- for (let i = 0; i < numTrials; i++) {
- for (let idx = 0; idx < exampleIndices.length; idx++) {
- const ex = JSON.parse(
- JSON.stringify(this.data[exampleIndices[idx]])
- );
- if (isNum) {
- ex.features.feature[
- e.detail.feature_name
- ].floatList.value[0] =
- xMin + i * ((1 / (numTrials - 1)) * (xMax - xMin));
- } else {
- ex.features.feature[
- e.detail.feature_name
- ].bytesList.value[0] = btoa(
- this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples[i]
- );
- }
- examples.push(ex);
- }
- }
- const PRED_SIZE = 3;
- const BATCH_SIZE = 128;
- const results = [[], [], []];
- const predValuesList = [];
- for (let i = 0; i < examples.length; i += BATCH_SIZE) {
- let tlist = [];
- for (
- let idx = i;
- idx < Math.min(i + BATCH_SIZE, examples.length);
- idx++
- ) {
- tlist.push(this.convertExToTensor(examples[idx]));
- }
- const tconcat = tf.concat(tlist);
- tlist.forEach((tensor) => tensor.dispose());
- const input = tconcat.reshape([tlist.length, 4]);
- const res = this.model.predict(input, {batchSize: BATCH_SIZE});
- const vals = await res.data();
- predValuesList.push(vals);
- input.dispose();
- res.dispose();
- tconcat.dispose();
- }
- const predSize = predValuesList.reduce((a, b) => a + b.length, 0);
- const predValues = new Float32Array(predSize);
- let curIdx = 0;
- for (let i = 0; i < predValuesList.length; i++) {
- predValues.set(predValuesList[i], curIdx);
- curIdx += predValuesList[i].length;
- }
- for (let trialIdx = 0; trialIdx < numTrials; trialIdx++) {
- const startingPredIdx =
- trialIdx * exampleIndices.length * PRED_SIZE;
- const scores = [[], [], []];
- for (
- let exampleIdx = 0;
- exampleIdx < exampleIndices.length;
- exampleIdx++
- ) {
- scores[0].push(
- predValues[startingPredIdx + exampleIdx * PRED_SIZE]
- );
- scores[1].push(
- predValues[startingPredIdx + exampleIdx * PRED_SIZE + 1]
- );
- scores[2].push(
- predValues[startingPredIdx + exampleIdx * PRED_SIZE + 2]
- );
+ const data = await this.createPdPlotData(
+ e.detail.feature_name,
+ e.detail.example_index,
+ +e.detail.x_min,
+ +e.detail.x_max
+ );
+ let isNum = false;
+ for (let entry of this.$.dash.partialDepPlotEligibleFeatures) {
+ if (entry.name == e.detail.feature_name && !entry.samples) {
+ isNum = true;
+ break;
}
- const score = [
- scores[0].reduce((prev, cur) => prev + cur, 0) /
- scores[0].length,
- scores[1].reduce((prev, cur) => prev + cur, 0) /
- scores[1].length,
- scores[2].reduce((prev, cur) => prev + cur, 0) /
- scores[2].length,
- ];
- const ex = examples[trialIdx * exampleIndices.length];
- const step = isNum
- ? ex.features.feature[e.detail.feature_name].floatList
- .value[0]
- : [
- atob(
- ex.features.feature[e.detail.feature_name].bytesList
- .value[0]
- ),
- ];
- results[0].push({step: step, scalar: score[0]});
- results[1].push({step: step, scalar: score[1]});
- results[2].push({step: step, scalar: score[2]});
}
this.$.dash.makeChartForFeature(
isNum ? 'numeric' : 'categorical',
e.detail.feature_name,
- [[{'0': results[0], '1': results[1], '2': results[2]}]]
+ data
);
};
setTimeout(method, 50);
});
+ this.$.dash.addEventListener('sort-eligible-features', async (e) => {
+ const method = async () => {
+ const chartData = {};
+ const list = this.$.dash.partialDepPlotEligibleFeatures;
+ for (let feat of list) {
+ chartData[feat.name] = await this.createPdPlotData(
+ feat.name,
+ e.detail.example_index,
+ +feat.observedMin,
+ +feat.observedMax
+ );
+ }
+ for (let feat of list) {
+ const charts = chartData[feat.name];
+ let maxMeasure = 0;
+ const isNum = feat.samples == null;
+ for (let models of charts) {
+ for (let chart of models) {
+ for (let key in chart) {
+ const series = chart[key];
+ let measure = 0;
+ if (isNum) {
+ for (let i = 0; i < series.length - 1; i++) {
+ measure += Math.abs(
+ series[i].scalar - series[i + 1].scalar
+ );
+ }
+ } else {
+ let minY = Infinity;
+ let maxY = -Infinity;
+ for (let i = 0; i < series.length; i++) {
+ const val = series[i].scalar;
+ if (val < minY) {
+ minY = val;
+ }
+ if (val > maxY) {
+ maxY = val;
+ }
+ }
+ measure = maxY - minY;
+ }
+ if (measure > maxMeasure) {
+ maxMeasure = measure;
+ }
+ }
+ }
+ }
+ feat.interestingness = maxMeasure;
+ }
+ this.$.dash.partialDepPlotEligibleFeatures = [];
+ list.sort((a, b) => b.interestingness - a.interestingness);
+ this.$.dash.partialDepPlotEligibleFeatures = list;
+ };
+ setTimeout(method, 50);
+ });
requestAnimationFrame(() => {
this.$.dash.inferClicked_();
this.$.dash.selectedLabelFeature = 'class';
});
});
},
+ createPdPlotData: async function(featureName, exampleIndex, xMin, xMax) {
+ const examples = [];
+ let feat = null;
+ for (let entry of this.$.dash.partialDepPlotEligibleFeatures) {
+ if (entry.name == featureName) {
+ feat = entry;
+ break;
+ }
+ }
+ const isNum = !feat.samples;
+ const numTrials = isNum ? 10 : feat.samples.length;
+ const exampleIndices =
+ exampleIndex == -1
+ ? [...Array(this.data.length).keys()]
+ : [exampleIndex];
+ for (let i = 0; i < numTrials; i++) {
+ for (let idx = 0; idx < exampleIndices.length; idx++) {
+ const ex = JSON.parse(
+ JSON.stringify(this.data[exampleIndices[idx]])
+ );
+ if (isNum) {
+ ex.features.feature[featureName].floatList.value[0] =
+ xMin + i * ((1 / (numTrials - 1)) * (xMax - xMin));
+ } else {
+ ex.features.feature[featureName].bytesList.value[0] = btoa(
+ feat.samples[i]
+ );
+ }
+ examples.push(ex);
+ }
+ }
+ const PRED_SIZE = 3;
+ const BATCH_SIZE = 128;
+ const results = [[], [], []];
+ const predValuesList = [];
+ for (let i = 0; i < examples.length; i += BATCH_SIZE) {
+ let tlist = [];
+ for (
+ let idx = i;
+ idx < Math.min(i + BATCH_SIZE, examples.length);
+ idx++
+ ) {
+ tlist.push(this.convertExToTensor(examples[idx]));
+ }
+ const tconcat = tf.concat(tlist);
+ tlist.forEach((tensor) => tensor.dispose());
+ const input = tconcat.reshape([tlist.length, 4]);
+ const res = this.model.predict(input, {batchSize: BATCH_SIZE});
+ const vals = await res.data();
+ predValuesList.push(vals);
+ input.dispose();
+ res.dispose();
+ tconcat.dispose();
+ }
+ const predSize = predValuesList.reduce((a, b) => a + b.length, 0);
+ const predValues = new Float32Array(predSize);
+ let curIdx = 0;
+ for (let i = 0; i < predValuesList.length; i++) {
+ predValues.set(predValuesList[i], curIdx);
+ curIdx += predValuesList[i].length;
+ }
+ for (let trialIdx = 0; trialIdx < numTrials; trialIdx++) {
+ const startingPredIdx = trialIdx * exampleIndices.length * PRED_SIZE;
+ const scores = [[], [], []];
+ for (
+ let exampleIdx = 0;
+ exampleIdx < exampleIndices.length;
+ exampleIdx++
+ ) {
+ scores[0].push(
+ predValues[startingPredIdx + exampleIdx * PRED_SIZE]
+ );
+ scores[1].push(
+ predValues[startingPredIdx + exampleIdx * PRED_SIZE + 1]
+ );
+ scores[2].push(
+ predValues[startingPredIdx + exampleIdx * PRED_SIZE + 2]
+ );
+ }
+ const score = [
+ scores[0].reduce((prev, cur) => prev + cur, 0) / scores[0].length,
+ scores[1].reduce((prev, cur) => prev + cur, 0) / scores[1].length,
+ scores[2].reduce((prev, cur) => prev + cur, 0) / scores[2].length,
+ ];
+ const ex = examples[trialIdx * exampleIndices.length];
+ const step = isNum
+ ? ex.features.feature[featureName].floatList.value[0]
+ : [atob(ex.features.feature[featureName].bytesList.value[0])];
+ results[0].push({step: step, scalar: score[0]});
+ results[1].push({step: step, scalar: score[1]});
+ results[2].push({step: step, scalar: score[2]});
+ }
+ return [[{'0': results[0], '1': results[1], '2': results[2]}]];
+ },
convertExToTensor: function(ex) {
const vals = [];
diff --git a/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-multi-demo.html b/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-multi-demo.html
index 92af086da7..d14ae27577 100644
--- a/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-multi-demo.html
+++ b/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/demo/tf-interactive-inference-multi-demo.html
@@ -355,147 +355,182 @@
});
this.$.dash.addEventListener('infer-mutants', async (e) => {
const method = async () => {
- const examples = [];
- const featureMapping = {
- age: 0,
- 'capital-gain': 1,
- 'capital-loss': 2,
- education: 3,
- 'education-num': 4,
- 'hours-per-week': 5,
- 'marital-status': 6,
- 'native-country': 7,
- occupation: 8,
- over_50k: 9,
- race: 10,
- relationship: 11,
- sex: 12,
- workclass: 13,
- };
- const xMin = +e.detail.x_min;
- const xMax = +e.detail.x_max;
- const isNum = !this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples;
- const numTrials = isNum
- ? 10
- : this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples.length;
- const exampleIndices =
- e.detail.example_index == -1
- ? [...Array(this.data.length).keys()]
- : [e.detail.example_index];
- for (let i = 0; i < numTrials; i++) {
- for (let idx = 0; idx < exampleIndices.length; idx++) {
- const ex = JSON.parse(
- JSON.stringify(this.data[exampleIndices[idx]])
- );
- if (isNum) {
- ex.features.feature[
- e.detail.feature_name
- ].floatList.value[0] =
- xMin + i * ((1 / (numTrials - 1)) * (xMax - xMin));
- } else {
- ex.features.feature[
- e.detail.feature_name
- ].bytesList.value[0] = btoa(
- this.$.dash.partialDepPlotEligibleFeatures[
- featureMapping[e.detail.feature_name]
- ].samples[i]
- );
- }
- examples.push(ex);
- }
- }
- const PRED_SIZE = 2;
- const BATCH_SIZE = 128;
- const results = [];
- const results2 = [];
- const predValuesList = [];
- const predValuesList2 = [];
- for (let i = 0; i < examples.length; i += BATCH_SIZE) {
- let tlist = [];
- for (
- let idx = i;
- idx < Math.min(i + BATCH_SIZE, examples.length);
- idx++
- ) {
- tlist.push(this.convertExToTensor(examples[idx]));
- }
- const tconcat = tf.concat(tlist);
- tlist.forEach((tensor) => tensor.dispose());
- const input = tconcat.reshape([tlist.length, 104]);
- const res = this.model.predict(input, {batchSize: BATCH_SIZE});
- const res2 = this.model2.predict(input, {
- batchSize: BATCH_SIZE,
- });
- const vals = await res.data();
- const vals2 = await res2.data();
- predValuesList.push(vals);
- predValuesList2.push(vals2);
- input.dispose();
- res.dispose();
- res2.dispose();
- tconcat.dispose();
- }
- const predSize = predValuesList.reduce((a, b) => a + b.length, 0);
- const predValues = new Float32Array(predSize);
- const predValues2 = new Float32Array(predSize);
- let curIdx = 0;
- for (let i = 0; i < predValuesList.length; i++) {
- predValues.set(predValuesList[i], curIdx);
- predValues2.set(predValuesList2[i], curIdx);
- curIdx += predValuesList[i].length;
- }
- for (let trialIdx = 0; trialIdx < numTrials; trialIdx++) {
- const startingPredIdx =
- trialIdx * exampleIndices.length * PRED_SIZE;
- const scores = [];
- const scores2 = [];
- for (
- let exampleIdx = 0;
- exampleIdx < exampleIndices.length;
- exampleIdx++
- ) {
- scores.push(
- predValues[startingPredIdx + exampleIdx * PRED_SIZE]
- );
- scores2.push(
- predValues2[startingPredIdx + exampleIdx * PRED_SIZE]
- );
+ const data = await this.createPdPlotData(
+ e.detail.feature_name,
+ e.detail.example_index,
+ +e.detail.x_min,
+ +e.detail.x_max
+ );
+ let isNum = false;
+ for (let entry of this.$.dash.partialDepPlotEligibleFeatures) {
+ if (entry.name == e.detail.feature_name && !entry.samples) {
+ isNum = true;
+ break;
}
- const score =
- scores.reduce((prev, cur) => prev + cur, 0) / scores.length;
- const score2 =
- scores2.reduce((prev, cur) => prev + cur, 0) / scores.length;
- const ex = examples[trialIdx * exampleIndices.length];
- const step = isNum
- ? ex.features.feature[e.detail.feature_name].floatList
- .value[0]
- : [
- atob(
- ex.features.feature[e.detail.feature_name].bytesList
- .value[0]
- ),
- ];
- results.push({step: step, scalar: score});
- results2.push({step: step, scalar: score2});
}
this.$.dash.makeChartForFeature(
isNum ? 'numeric' : 'categorical',
e.detail.feature_name,
- [[{'1': results}, {'1': results2}]]
+ data
);
};
setTimeout(method, 50);
});
+ this.$.dash.addEventListener('sort-eligible-features', async (e) => {
+ const method = async () => {
+ const chartData = {};
+ const list = this.$.dash.partialDepPlotEligibleFeatures;
+ for (let feat of list) {
+ chartData[feat.name] = await this.createPdPlotData(
+ feat.name,
+ e.detail.example_index,
+ +feat.observedMin,
+ +feat.observedMax
+ );
+ }
+ for (let feat of list) {
+ const charts = chartData[feat.name];
+ let maxMeasure = 0;
+ const isNum = feat.samples == null;
+ for (let models of charts) {
+ for (let chart of models) {
+ for (let key in chart) {
+ const series = chart[key];
+ let measure = 0;
+ if (isNum) {
+ for (let i = 0; i < series.length - 1; i++) {
+ measure += Math.abs(
+ series[i].scalar - series[i + 1].scalar
+ );
+ }
+ } else {
+ let minY = Infinity;
+ let maxY = -Infinity;
+ for (let i = 0; i < series.length; i++) {
+ const val = series[i].scalar;
+ if (val < minY) {
+ minY = val;
+ }
+ if (val > maxY) {
+ maxY = val;
+ }
+ }
+ measure = maxY - minY;
+ }
+ if (measure > maxMeasure) {
+ maxMeasure = measure;
+ }
+ }
+ }
+ }
+ feat.interestingness = maxMeasure;
+ }
+ this.$.dash.partialDepPlotEligibleFeatures = [];
+ list.sort((a, b) => b.interestingness - a.interestingness);
+ this.$.dash.partialDepPlotEligibleFeatures = list;
+ };
+ setTimeout(method, 50);
+ });
requestAnimationFrame(() => {
this.$.dash.inferClicked_();
this.$.dash.selectedLabelFeature = 'over_50k';
});
});
},
+ createPdPlotData: async function(featureName, exampleIndex, xMin, xMax) {
+ const examples = [];
+ let feat = null;
+ for (let entry of this.$.dash.partialDepPlotEligibleFeatures) {
+ if (entry.name == featureName) {
+ feat = entry;
+ break;
+ }
+ }
+ const isNum = !feat.samples;
+ const numTrials = isNum ? 10 : feat.samples.length;
+ const exampleIndices =
+ exampleIndex == -1
+ ? [...Array(this.data.length).keys()]
+ : [exampleIndex];
+ for (let i = 0; i < numTrials; i++) {
+ for (let idx = 0; idx < exampleIndices.length; idx++) {
+ const ex = JSON.parse(
+ JSON.stringify(this.data[exampleIndices[idx]])
+ );
+ if (isNum) {
+ ex.features.feature[featureName].floatList.value[0] =
+ xMin + i * ((1 / (numTrials - 1)) * (xMax - xMin));
+ } else {
+ ex.features.feature[featureName].bytesList.value[0] = btoa(
+ feat.samples[i]
+ );
+ }
+ examples.push(ex);
+ }
+ }
+ const PRED_SIZE = 2;
+ const BATCH_SIZE = 128;
+ const results = [];
+ const results2 = [];
+ const predValuesList = [];
+ const predValuesList2 = [];
+ for (let i = 0; i < examples.length; i += BATCH_SIZE) {
+ let tlist = [];
+ for (
+ let idx = i;
+ idx < Math.min(i + BATCH_SIZE, examples.length);
+ idx++
+ ) {
+ tlist.push(this.convertExToTensor(examples[idx]));
+ }
+ const tconcat = tf.concat(tlist);
+ tlist.forEach((tensor) => tensor.dispose());
+ const input = tconcat.reshape([tlist.length, 104]);
+ const res = this.model.predict(input, {batchSize: BATCH_SIZE});
+ const res2 = this.model2.predict(input, {batchSize: BATCH_SIZE});
+ const vals = await res.data();
+ const vals2 = await res2.data();
+ predValuesList.push(vals);
+ predValuesList2.push(vals2);
+ input.dispose();
+ res.dispose();
+ res2.dispose();
+ tconcat.dispose();
+ }
+ const predSize = predValuesList.reduce((a, b) => a + b.length, 0);
+ const predValues = new Float32Array(predSize);
+ const predValues2 = new Float32Array(predSize);
+ let curIdx = 0;
+ for (let i = 0; i < predValuesList.length; i++) {
+ predValues.set(predValuesList[i], curIdx);
+ predValues2.set(predValuesList2[i], curIdx);
+ curIdx += predValuesList[i].length;
+ }
+ for (let trialIdx = 0; trialIdx < numTrials; trialIdx++) {
+ const startingPredIdx = trialIdx * exampleIndices.length * PRED_SIZE;
+ const scores = [];
+ const scores2 = [];
+ for (
+ let exampleIdx = 0;
+ exampleIdx < exampleIndices.length;
+ exampleIdx++
+ ) {
+ scores.push(predValues[startingPredIdx + exampleIdx * PRED_SIZE]);
+ scores2.push(predValues2[startingPredIdx + exampleIdx * PRED_SIZE]);
+ }
+ const score =
+ scores.reduce((prev, cur) => prev + cur, 0) / scores.length;
+ const score2 =
+ scores2.reduce((prev, cur) => prev + cur, 0) / scores.length;
+ const ex = examples[trialIdx * exampleIndices.length];
+ const step = isNum
+ ? ex.features.feature[featureName].floatList.value[0]
+ : [atob(ex.features.feature[featureName].bytesList.value[0])];
+ results.push({step: step, scalar: score});
+ results2.push({step: step, scalar: score2});
+ }
+ return [[{'1': results}, {'1': results2}]];
+ },
convertExToTensor: function(ex) {
const vals = [];
diff --git a/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/tf-interactive-inference-dashboard.html b/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/tf-interactive-inference-dashboard.html
index ef42aa3a7e..90b514d8ce 100644
--- a/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/tf-interactive-inference-dashboard.html
+++ b/tensorboard/plugins/interactive_inference/tf_interactive_inference_dashboard/tf-interactive-inference-dashboard.html
@@ -1087,6 +1087,10 @@
border-bottom: solid 1px var(--wit-color-gray300);
}
+ .perf-table-entry.perf-table-entry-trivial {
+ background: #f4f4f4;
+ }
+
.perf-table-entry-expanded {
display: flex;
flex-wrap: wrap;
@@ -1249,6 +1253,15 @@
--paper-toggle-button-checked-button-color: white;
--paper-toggle-button-label-color: #3c4043;
}
+ .button-and-spinner-holder {
+ margin-top: 4px;
+ position: relative;
+ }
+ .sort-spinner {
+ position: absolute;
+ top: 8px;
+ left: 8px;
+ }