-
Notifications
You must be signed in to change notification settings - Fork 13.6k
hparams : add n_embd_inp() to support extended embed #16928
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -276,8 +276,8 @@ static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w | |
| } break; | ||
| case GGML_OP_IM2COL: | ||
| { | ||
| const int n_embd = hparams.n_embd; | ||
| ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd, w->ne[1], 1, 1); | ||
| const int n_embd_inp = hparams.n_embd_inp(); | ||
| ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd_inp, w->ne[1], 1, 1); | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm unsure if this is correct as well... There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think either |
||
| op_tensor = ggml_im2col(ctx, w, b, 1, 0, 0, 0, 1, 0, false, GGML_TYPE_F16); | ||
| } break; | ||
| case GGML_OP_SCALE: | ||
|
|
@@ -1039,9 +1039,6 @@ void llama_model::load_hparams(llama_model_loader & ml) { | |
| case 64: type = LLM_TYPE_32B; break; | ||
| default: type = LLM_TYPE_UNKNOWN; | ||
| } | ||
| // since vision model stacks deepstack features along feature dim | ||
| // we also create a fake "n_embd" for text model to be the main embd + deepstack embds | ||
| hparams.n_embd *= hparams.n_deepstack_layers + 1; | ||
| } break; | ||
| case LLM_ARCH_QWEN3MOE: | ||
| { | ||
|
|
@@ -1065,9 +1062,6 @@ void llama_model::load_hparams(llama_model_loader & ml) { | |
| case 94: type = LLM_TYPE_235B_A22B; break; | ||
| default: type = LLM_TYPE_UNKNOWN; | ||
| } | ||
| // since vision model stacks deepstack features along feature dim | ||
| // we also create a fake "n_embd" for text model to be the main embd + deepstack embds | ||
| hparams.n_embd *= hparams.n_deepstack_layers + 1; | ||
| } break; | ||
| case LLM_ARCH_PHI2: | ||
| { | ||
|
|
@@ -3332,10 +3326,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) { | |
| case LLM_ARCH_QWEN3: | ||
| case LLM_ARCH_QWEN3VL: | ||
| { | ||
| // for model loading, the weights only have the main embd | ||
| // so we need to divide by the number of deepstack layers + 1 | ||
| // n_embd is const int so we declare a new variable | ||
| int64_t n_embd = hparams.n_embd / (hparams.n_deepstack_layers + 1); | ||
| tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); | ||
|
|
||
| // output | ||
|
|
@@ -3371,10 +3361,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) { | |
| case LLM_ARCH_QWEN3MOE: | ||
| case LLM_ARCH_QWEN3VLMOE: | ||
| { | ||
| // for model loading, the weights only have the main embd | ||
| // so we need to divide by the number of deepstack layers + 1 | ||
| // n_embd is const int so we declare a new variable | ||
| int64_t n_embd = hparams.n_embd / (hparams.n_deepstack_layers + 1); | ||
| tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); | ||
|
|
||
| // output | ||
|
|
@@ -6482,6 +6468,7 @@ void llama_model::print_info() const { | |
| if (!hparams.vocab_only) { | ||
| LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train); | ||
| LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd); | ||
| LLAMA_LOG_INFO("%s: n_embd_inp = %u\n", __func__, hparams.n_embd_inp()); | ||
| LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer); | ||
| LLAMA_LOG_INFO("%s: n_head = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head(il); }, hparams.n_layer).c_str()); | ||
| LLAMA_LOG_INFO("%s: n_head_kv = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str()); | ||
|
|
@@ -6681,8 +6668,9 @@ ggml_backend_buffer_type_t llama_model::select_buft(int il) const { | |
| return ::select_buft( | ||
| *pimpl->dev_layer.at(il).buft_list, | ||
| [&](ggml_context * ctx) { | ||
| ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd); | ||
| ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd); | ||
| const int n_embd_inp = hparams.n_embd_inp(); | ||
| ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd_inp); | ||
| ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd_inp); | ||
|
Comment on lines
+6671
to
+6673
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not 100% sure if As far as I can understand, the
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So I just know what I learned from trying to make cvectors for Qwen3VL, so with that in mind:
This at least is called from llama-adapter.cpp for cvectors, but the check in cvector is hardcoded against n_embd, so it will still pass regardless (was wondering why my cvector worked with this code change). I assume since the cvector is for the language model, it doesn't matter if the other ones are empty so much so long as they are zeroed. It seems like the answer might be use n_embd here unless there is somewhere multimodal where this is used, or multimodal cvectors are a thing, or this is called from other contexts that need the "rest" of it? |
||
| return ggml_add(ctx, cur, layer_dir); | ||
| }); | ||
| } | ||
|
|
@@ -7329,6 +7317,10 @@ int32_t llama_model_n_embd(const llama_model * model) { | |
| return model->hparams.n_embd; | ||
| } | ||
|
|
||
| int32_t llama_model_n_embd_inp(const llama_model * model) { | ||
| return model->hparams.n_embd_inp(); | ||
| } | ||
|
|
||
| int32_t llama_model_n_layer(const llama_model * model) { | ||
| return model->hparams.n_layer; | ||
| } | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Technically this should be
n_embd_out(aka currentlyn_embd), right?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could be, I wasn't entirely sure.