Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixed bug where in masked_lm transformations only subwords were candidates for top_words #417

Merged
merged 19 commits into from
Feb 15, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 79 additions & 1 deletion docs/3recipes/models.md

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
"nbsphinx",
# Enable .md doc files
"recommonmark",
"sphinx_markdown_tables",
]
autosummary_generate = True

Expand Down
1 change: 1 addition & 0 deletions docs/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
recommonmark
sphinx-markdown-tables
nbsphinx
sphinx - autobuild
sphinx - rtd - theme
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,4 @@ word2number
num2words
more-itertools
PySocks!=1.5.7,>=1.5.6
sphinx-markdown-tables
6 changes: 3 additions & 3 deletions tests/sample_outputs/run_attack_stanza_pos_tagger.txt
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@ lovingly photographed in the manner of a golden book sprung to subsistence


--------------------------------------------- Result 2 ---------------------------------------------
Positive (99%) --> Negative (58%)
Positive (99%) --> Negative (91%)

consistently clever and suspenseful .

persistently brainy and suspenseful .
persistently malin and suspenseful .


--------------------------------------------- Result 3 ---------------------------------------------
Expand Down Expand Up @@ -59,5 +59,5 @@ the story gives ample opportunity for large-scale action and suspense , which di
| Attack success rate: | 100.0% |
| Average perturbed word %: | 22.04% |
| Average num. words per input: | 15.5 |
| Avg num queries: | 175.67 |
| Avg num queries: | 174.67 |
+-------------------------------+--------+
77 changes: 77 additions & 0 deletions textattack/models/README.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion textattack/shared/utils/install.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def set_cache_dir(cache_dir):
def _post_install_if_needed():
"""Runs _post_install if hasn't been run since install."""
# Check for post-install file.
post_install_file_path = path_in_cache("post_install_check_2")
post_install_file_path = path_in_cache("post_install_check_3")
post_install_file_lock_path = post_install_file_path + ".lock"
post_install_file_lock = filelock.FileLock(post_install_file_lock_path)
post_install_file_lock.acquire()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,21 +113,21 @@ def _get_new_words(self, current_text, indices_to_modify):
top_words = []
for _id in ranked_indices:
_id = _id.item()
token = self._lm_tokenizer.convert_ids_to_tokens(_id)
word = self._lm_tokenizer.convert_ids_to_tokens(_id)
if utils.check_if_subword(
token,
word,
self._language_model.config.model_type,
(masked_index == 1),
):
word = utils.strip_BPE_artifacts(
token, self._language_model.config.model_type
word, self._language_model.config.model_type
)
if (
mask_token_probs[_id] >= self.min_confidence
and utils.is_one_word(word)
and not utils.check_if_punctuations(word)
):
top_words.append(word)
if (
mask_token_probs[_id] >= self.min_confidence
and utils.is_one_word(word)
and not utils.check_if_punctuations(word)
):
top_words.append(word)

if (
len(top_words) >= self.max_candidates
Expand Down
18 changes: 9 additions & 9 deletions textattack/transformations/word_merges/word_merge_masked_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,21 +112,21 @@ def _get_merged_words(self, current_text, indices_to_modify):
top_words = []
for _id in ranked_indices:
_id = _id.item()
token = self._lm_tokenizer.convert_ids_to_tokens(_id)
word = self._lm_tokenizer.convert_ids_to_tokens(_id)
if utils.check_if_subword(
token,
word,
self._language_model.config.model_type,
(masked_index == 1),
):
word = utils.strip_BPE_artifacts(
token, self._language_model.config.model_type
word, self._language_model.config.model_type
)
if (
mask_token_probs[_id] >= self.min_confidence
and utils.is_one_word(word)
and not utils.check_if_punctuations(word)
):
top_words.append(word)
if (
mask_token_probs[_id] >= self.min_confidence
and utils.is_one_word(word)
and not utils.check_if_punctuations(word)
):
top_words.append(word)

if (
len(top_words) >= self.max_candidates
Expand Down
18 changes: 9 additions & 9 deletions textattack/transformations/word_swaps/word_swap_masked_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,21 +136,21 @@ def _bae_replacement_words(self, current_text, indices_to_modify):
top_words = []
for _id in ranked_indices:
_id = _id.item()
token = self._lm_tokenizer.convert_ids_to_tokens(_id)
word = self._lm_tokenizer.convert_ids_to_tokens(_id)
if utils.check_if_subword(
token,
word,
self._language_model.config.model_type,
(masked_index == 1),
):
word = utils.strip_BPE_artifacts(
token, self._language_model.config.model_type
word, self._language_model.config.model_type
)
if (
mask_token_probs[_id] >= self.min_confidence
and utils.is_one_word(word)
and not utils.check_if_punctuations(word)
):
top_words.append(word)
if (
mask_token_probs[_id] >= self.min_confidence
and utils.is_one_word(word)
and not utils.check_if_punctuations(word)
):
top_words.append(word)

if (
len(top_words) >= self.max_candidates
Expand Down