Skip to content

Commit

Permalink
fix the error in interpreter when input is nd seq
Browse files Browse the repository at this point in the history
  • Loading branch information
zegnog committed Jul 5, 2021
1 parent 5378533 commit 2415cb9
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
# Normalize results
for key, grad in grads.items():
# The [0] here is undo-ing the batching that happens in get_gradients.
embedding_grad = numpy.sum(grad[0], axis=1)
norm = numpy.linalg.norm(embedding_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
embedding_grad = numpy.sum(grad[0], axis=-1)
norm = numpy.linalg.norm(embedding_grad, ord=1, keepdims=True)
normalized_grad = embedding_grad / norm
grads[key] = normalized_grad

instances_with_grads["instance_" + str(idx + 1)] = grads
Expand Down
6 changes: 3 additions & 3 deletions allennlp/interpret/saliency_interpreters/simple_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,9 @@ def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
# gradient and its respective embedding.
input_idx = int(key[-1]) - 1
# The [0] here is undo-ing the batching that happens in get_gradients.
emb_grad = numpy.sum(grad[0] * embeddings_list[input_idx][0], axis=1)
norm = numpy.linalg.norm(emb_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in emb_grad]
emb_grad = numpy.sum(grad[0] * embeddings_list[input_idx][0], axis=-1)
norm = numpy.linalg.norm(emb_grad, ord=1, keepdims=True)
normalized_grad = emb_grad / norm
grads[key] = normalized_grad

instances_with_grads["instance_" + str(idx + 1)] = grads
Expand Down
6 changes: 3 additions & 3 deletions allennlp/interpret/saliency_interpreters/smooth_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@ def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
# Fine for now, but should fix for consistency.

# The [0] here is undo-ing the batching that happens in get_gradients.
embedding_grad = numpy.sum(grad[0], axis=1)
norm = numpy.linalg.norm(embedding_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
embedding_grad = numpy.sum(grad[0], axis=-1)
norm = numpy.linalg.norm(embedding_grad, ord=1, keepdims=True)
normalized_grad = embedding_grad / norm
grads[key] = normalized_grad

instances_with_grads["instance_" + str(idx + 1)] = grads
Expand Down

0 comments on commit 2415cb9

Please sign in to comment.