Skip to content

Commit be9a25f

Browse files
committed
llama : remove unicode.h from llama-model.cpp
ggml-ci
1 parent 1502dc6 commit be9a25f

File tree

1 file changed

+4
-5
lines changed

1 file changed

+4
-5
lines changed

src/llama-model.cpp

+4-5
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66

77
#include "ggml-cpp.h"
88

9-
#include "unicode.h" // TODO: remove
10-
119
#include <algorithm>
1210
#include <cassert>
1311
#include <cstring>
@@ -1268,6 +1266,7 @@ void llama_model::load_vocab(llama_model_loader & ml) {
12681266
vocab.n_vocab = 0;
12691267
LLAMA_LOG_WARN("%s: there is no vocab_size in metadata, vocab.n_vocab will be set to %u\n", __func__, vocab.n_vocab);
12701268
}
1269+
12711270
return;
12721271
}
12731272

@@ -1305,7 +1304,7 @@ void llama_model::load_vocab(llama_model_loader & ml) {
13051304
const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
13061305
for (int i = 0; i < n_merges; i++) {
13071306
const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
1308-
GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
1307+
//GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
13091308

13101309
std::string first;
13111310
std::string second;
@@ -1899,8 +1898,8 @@ void llama_model::load_vocab(llama_model_loader & ml) {
18991898
//NOTE: Per token attributes are missing from the GGUF file.
19001899
//TODO: Extract attributes from GGUF file.
19011900
{
1902-
auto _contains_any = [] (const std::string &str, const std::vector<std::string> &substrs) -> bool {
1903-
for (auto substr : substrs) {
1901+
auto _contains_any = [] (const std::string & str, const std::vector<std::string> & substrs) -> bool {
1902+
for (const auto & substr : substrs) {
19041903
if (str.find(substr) < std::string::npos) {
19051904
return true;
19061905
}

0 commit comments

Comments
 (0)