We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ae86b5e commit bd76198Copy full SHA for bd76198
src/llama.cpp
@@ -13823,9 +13823,6 @@ struct llm_build_context {
13823
struct ggml_cgraph * build_nemotron() {
13824
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
13825
13826
- // mutable variable, needed during the last layer of the computation to skip unused tokens
13827
- int32_t n_tokens = this->n_tokens;
13828
-
13829
const int64_t n_embd_head = hparams.n_embd_head_v;
13830
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
13831
//GGML_ASSERT(n_embd_head == hparams.n_rot);
0 commit comments