Skip to content

Commit 3a716b4

Browse files
authored
Fix for #3454 (#3455)
Fix: `sentencepiece` tokenizers with added tokens failed with an incorrect assertion
1 parent 1faaae8 commit 3a716b4

File tree

1 file changed

+6
-2
lines changed

1 file changed

+6
-2
lines changed

llama.cpp

+6-2
Original file line numberDiff line numberDiff line change
@@ -8200,7 +8200,9 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch
82008200
buf[0] = llama_token_to_byte(model->vocab, token);
82018201
return 1;
82028202
} else {
8203-
GGML_ASSERT(false);
8203+
// TODO: for now we accept all unsupported token types,
8204+
// suppressing them like CONTROL tokens.
8205+
// GGML_ASSERT(false);
82048206
}
82058207
break;
82068208
}
@@ -8216,7 +8218,9 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch
82168218
} else if (llama_is_control_token(model->vocab, token)) {
82178219
;
82188220
} else {
8219-
GGML_ASSERT(false);
8221+
// TODO: for now we accept all unsupported token types,
8222+
// suppressing them like CONTROL tokens.
8223+
// GGML_ASSERT(false);
82208224
}
82218225
break;
82228226
}

0 commit comments

Comments
 (0)