From 797088a7cd71eb56373ab6f0f7166aa2b51cdaf1 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 14 Aug 2023 14:10:21 +0300 Subject: [PATCH] minor : indentation + assert --- examples/gguf/gguf.cpp | 21 ++++-- gguf-llama.cpp | 167 +++++++++++++++++++++-------------------- gguf-llama.h | 2 +- 3 files changed, 100 insertions(+), 90 deletions(-) diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp index 6f454a2047ddd..a8521115ab562 100644 --- a/examples/gguf/gguf.cpp +++ b/examples/gguf/gguf.cpp @@ -8,6 +8,12 @@ #include #include #include + +#undef MIN +#undef MAX +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + /* template static std::string to_string(const T & val) { @@ -16,6 +22,7 @@ static std::string to_string(const T & val) { return ss.str(); } */ + void gguf_ex_write_str(std::ofstream & fout, const std::string & val) { const int32_t n = val.size(); fout.write((const char *) &n, sizeof(n)); @@ -377,28 +384,28 @@ bool gguf_ex_read_2(const std::string & fname) { struct gguf_file file(fname.c_str(), "rb"); gguf_mmap data_mmap(&file, 0, false); + const int n_tensors = gguf_get_n_tensors(ctx); for (int i = 0; i < n_tensors; ++i) { - const char * name = gguf_get_tensor_name(ctx, i); - const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i); + const char * name = gguf_get_tensor_name(ctx, i); + const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i); + struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); cur->data = static_cast(data_mmap.addr) + offset; // print first 10 elements - const float * data = (const float *) cur->data; + const float * data = (const float *) cur->data; printf("%s data[:10] : ", name); - - for (int j = 0; j < 10; ++j) { + for (int j = 0; j < MIN(10, ggml_nelements(cur)); ++j) { printf("%f ", data[j]); } - printf("\n\n"); } -fprintf(stdout, "%s: ctx_data size: %zu\n", __func__, ggml_get_mem_size(ctx_data)); + fprintf(stdout, "%s: ctx_data size: %zu\n", __func__, ggml_get_mem_size(ctx_data)); ggml_free(ctx_data); gguf_free(ctx); diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 8c9a96c08c047..5529ff36e1c77 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -508,17 +508,16 @@ struct gguf_load_tensors_map { enum gguf_file_version { GGUF_FILE_VERSION_V1 = 1, - }; - struct gguf_file_loader { gguf_file file; gguf_context * gguf_ctx; gguf_file_version file_version; llama_hparams hparams; llama_vocab vocab; -struct ggml_context * ctx_data = NULL; + + struct ggml_context * ctx_data = NULL; gguf_file_loader(const char * fname, gguf_load_tensors_map & tensors_map) : file(fname, "rb") { @@ -537,7 +536,7 @@ struct ggml_context * ctx_data = NULL; read_tensor_metadata(tensors_map); } - uint32_t read_u32(const char * key) { + uint32_t read_u32(const char * key) const { int i = gguf_find_key(gguf_ctx, key); if (i == -1) { throw std::runtime_error(format("cannot find param with key %s\n", key)); @@ -546,7 +545,7 @@ struct ggml_context * ctx_data = NULL; return gguf_get_val_u32(gguf_ctx, i); } - float read_f32(const char * key) { + float read_f32(const char * key) const { int i = gguf_find_key(gguf_ctx, key); if (i == -1) { throw std::runtime_error(format("cannot find param with key %s\n", key)); @@ -555,27 +554,26 @@ struct ggml_context * ctx_data = NULL; return gguf_get_val_f32(gguf_ctx, i); } - int read_n_vocab() { + int read_n_vocab() const { int i = gguf_find_key(gguf_ctx, "tokenizer.ggml.tokens"); - if (i == -1) { - throw std::runtime_error("cannot find token list in GGUF file\n"); - } + if (i == -1) { + throw std::runtime_error("cannot find token list in GGUF file\n"); + } - return gguf_get_arr_n(gguf_ctx, i); + return gguf_get_arr_n(gguf_ctx, i); } void read_hparams() { - // TODO define keys as constants in header // TODO: read all hparams from file - hparams.n_vocab = read_n_vocab(); - hparams.n_ctx = read_u32("llama.context_length"); - hparams.n_embd = read_u32("llama.embedding_length"); - hparams.n_ff = read_u32("llama.feed_forward_length"); - hparams.n_head = read_u32("llama.attention.head_count"); - hparams.n_layer = read_u32("llama.layer_count"); - hparams.n_rot = read_u32("llama.rope.dimension_count"); + hparams.n_vocab = read_n_vocab(); + hparams.n_ctx = read_u32("llama.context_length"); + hparams.n_embd = read_u32("llama.embedding_length"); + hparams.n_ff = read_u32("llama.feed_forward_length"); + hparams.n_head = read_u32("llama.attention.head_count"); + hparams.n_layer = read_u32("llama.layer_count"); + hparams.n_rot = read_u32("llama.rope.dimension_count"); hparams.f_rms_norm_eps = read_f32("llama.attention.layer_norm_rms_epsilon"); // LLaMAv2 @@ -606,7 +604,7 @@ struct ggml_context * ctx_data = NULL; } } - void read_tensor_metadata(gguf_load_tensors_map & tensors_map) { + void read_tensor_metadata(gguf_load_tensors_map & tensors_map) const { const int n_tensors = gguf_get_n_tensors(gguf_ctx); for (int i = 0; i < n_tensors; ++i) { @@ -614,16 +612,19 @@ struct ggml_context * ctx_data = NULL; const char * name = gguf_get_tensor_name(gguf_ctx, i); struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); - uint32_t n_dims = cur->n_dims; + + const uint32_t n_dims = cur->n_dims; tensor.type = cur->type; tensor.ne.resize(n_dims); + for (uint32_t j = 0; j < n_dims; ++j) { - tensor.ne[j] = cur->ne[j]; + tensor.ne[j] = cur->ne[j]; } if (n_dims < 1 || n_dims > 2) { throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name, n_dims)); } + switch (tensor.type) { case GGML_TYPE_F32: case GGML_TYPE_F16: @@ -643,7 +644,6 @@ struct ggml_context * ctx_data = NULL; } } - tensor.file_off = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, i); tensor.name = name; @@ -670,47 +670,47 @@ struct gguf_file_saver { gguf_file_saver(const char * fname, gguf_file_loader * fl, enum llama_ftype new_ftype) : file(fname, "wb"), fl(fl) { - fprintf(stderr, "llama.cpp: saving model to %s\n", fname); - write_header(); - write_hparams(new_ftype); - } + fprintf(stderr, "llama.cpp: saving model to %s\n", fname); + write_header(); + write_hparams(new_ftype); + } void write_header() { const int32_t magic = GGUF_MAGIC; file.write_i32(magic); - const int32_t version = GGUF_VERSION; - file.write_i32(version); + const int32_t version = GGUF_VERSION; + file.write_i32(version); - const int32_t n_tensors = gguf_get_n_tensors(fl->gguf_ctx); - file.write_i32(n_tensors); + const int32_t n_tensors = gguf_get_n_tensors(fl->gguf_ctx); + file.write_i32(n_tensors); - const int32_t n_kv = gguf_get_n_kv(fl->gguf_ctx); - file.write_i32(n_kv); - } + const int32_t n_kv = gguf_get_n_kv(fl->gguf_ctx); + file.write_i32(n_kv); + } - void write_hparam_arr_str(const std::string & key, enum gguf_type type, int i, int n_arr) { - std::vector data(n_arr); + void write_hparam_arr_str(const std::string & key, enum gguf_type type, int i, int n_arr) { + std::vector data(n_arr); - for (int j = 0; j < n_arr; ++j) { - std::string val = gguf_get_arr_str(fl->gguf_ctx, i, j); - data[j] = val; - } - - file.write_arr(key, type, data); + for (int j = 0; j < n_arr; ++j) { + std::string val = gguf_get_arr_str(fl->gguf_ctx, i, j); + data[j] = val; } - void write_hparam_arr_f32(const std::string & key, enum gguf_type type, int i, int n_arr) { - std::vector data(n_arr); + file.write_arr(key, type, data); + } - for (int j = 0; j < n_arr; ++j) { - float val = gguf_get_arr_f32(fl->gguf_ctx, i, j); - data[j] = val; - } + void write_hparam_arr_f32(const std::string & key, enum gguf_type type, int i, int n_arr) { + std::vector data(n_arr); - file.write_arr(key, type, data); + for (int j = 0; j < n_arr; ++j) { + float val = gguf_get_arr_f32(fl->gguf_ctx, i, j); + data[j] = val; } + file.write_arr(key, type, data); + } + void write_hparams(enum llama_ftype new_ftype) { const int32_t n_kv = gguf_get_n_kv(fl->gguf_ctx); for (int i = 0; i < n_kv; ++i) { @@ -734,59 +734,62 @@ struct gguf_file_saver { switch(vtype) { case GGUF_TYPE_BOOL: - bool_val = gguf_get_val_bool(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_BOOL, bool_val); - break; + bool_val = gguf_get_val_bool(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_BOOL, bool_val); + break; case GGUF_TYPE_FLOAT32: - f32_val = gguf_get_val_f32(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_FLOAT32, f32_val); - break; + f32_val = gguf_get_val_f32(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_FLOAT32, f32_val); + break; case GGUF_TYPE_INT16: - i16_val = gguf_get_val_i16(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_INT16, i16_val); - break; + i16_val = gguf_get_val_i16(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_INT16, i16_val); + break; case GGUF_TYPE_INT32: - i32_val = gguf_get_val_i32(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_INT32, i32_val); - break; + i32_val = gguf_get_val_i32(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_INT32, i32_val); + break; case GGUF_TYPE_INT8: - i8_val = gguf_get_val_i8(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_INT8, i8_val); - break; + i8_val = gguf_get_val_i8(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_INT8, i8_val); + break; case GGUF_TYPE_STRING: - str_val = gguf_get_val_str(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_STRING, str_val); - break; + str_val = gguf_get_val_str(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_STRING, str_val); + break; case GGUF_TYPE_UINT16: - u16_val = gguf_get_val_u16(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_UINT16, u16_val); - break; + u16_val = gguf_get_val_u16(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_UINT16, u16_val); + break; case GGUF_TYPE_UINT32: - u32_val = gguf_get_val_u32(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_UINT32, u32_val); - break; + u32_val = gguf_get_val_u32(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_UINT32, u32_val); + break; case GGUF_TYPE_UINT8: - u8_val = gguf_get_val_u8(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_UINT8, u8_val); - break; + u8_val = gguf_get_val_u8(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_UINT8, u8_val); + break; case GGUF_TYPE_ARRAY: - arr_type = gguf_get_arr_type(fl->gguf_ctx, i); - n_arr = gguf_get_arr_n(fl->gguf_ctx, i); - if (arr_type == GGUF_TYPE_FLOAT32) { - write_hparam_arr_f32(key, arr_type, i, n_arr); + arr_type = gguf_get_arr_type(fl->gguf_ctx, i); + n_arr = gguf_get_arr_n(fl->gguf_ctx, i); + if (arr_type == GGUF_TYPE_FLOAT32) { + write_hparam_arr_f32(key, arr_type, i, n_arr); } else if (arr_type == GGUF_TYPE_STRING) { write_hparam_arr_str(key, GGUF_TYPE_STRING, i, n_arr); } else { throw std::runtime_error("not implemented"); } - break; + break; default: - throw std::runtime_error(format("cannot recognize value type for key %s\n", key)); + throw std::runtime_error(format("cannot recognize value type for key %s\n", key)); } } } - info_offset = file.tell(); + info_offset = file.tell(); + + GGML_ASSERT(gguf_get_data_offset(fl->gguf_ctx) >= info_offset); + size_t count = gguf_get_data_offset(fl->gguf_ctx) - info_offset; file.write_zeros(count); file.seek(info_offset, SEEK_SET); diff --git a/gguf-llama.h b/gguf-llama.h index 2f8d3a3effe32..a8ed69d918048 100644 --- a/gguf-llama.h +++ b/gguf-llama.h @@ -137,7 +137,7 @@ extern "C" { // model quantization parameters typedef struct llama_model_quantize_params { int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() - enum llama_ftype ftype; // quantize to this llama_ftype + enum llama_ftype ftype; // quantize to this llama_ftype bool allow_requantize; // allow quantizing non-f32/f16 tensors bool quantize_output_tensor; // quantize output.weight } llama_model_quantize_params;