@@ -1991,11 +1991,11 @@ struct llama_model_loader {
1991
1991
return tensor;
1992
1992
}
1993
1993
1994
- struct ggml_tensor * create_tensor (struct ggml_context * ctx, const std::string & name, const std::vector<int64_t > & ne, ggml_backend_type backend, bool optional = false ) {
1994
+ struct ggml_tensor * create_tensor (struct ggml_context * ctx, const std::string & name, const std::vector<int64_t > & ne, ggml_backend_type backend, bool required = true ) {
1995
1995
struct ggml_tensor * cur = ggml_get_tensor (ctx_meta, name.c_str ());
1996
1996
1997
1997
if (cur == NULL ) {
1998
- if (optional ) {
1998
+ if (!required ) {
1999
1999
return NULL ;
2000
2000
}
2001
2001
throw std::runtime_error (format (" %s: tensor '%s' not found" , __func__, name.c_str ()));
@@ -2816,10 +2816,10 @@ static void llm_load_tensors(
2816
2816
layer.wo = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_OUT, " weight" , i), {n_embd, n_embd}, backend_split);
2817
2817
2818
2818
// optional bias tensors
2819
- layer.bq = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_Q, " bias" , i), {n_embd}, backend, true );
2820
- layer.bk = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_K, " bias" , i), {n_embd_gqa}, backend, true );
2821
- layer.bv = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_V, " bias" , i), {n_embd_gqa}, backend, true );
2822
- layer.bo = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_OUT, " bias" , i), {n_embd}, backend, true );
2819
+ layer.bq = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_Q, " bias" , i), {n_embd}, backend, false );
2820
+ layer.bk = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_K, " bias" , i), {n_embd_gqa}, backend, false );
2821
+ layer.bv = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_V, " bias" , i), {n_embd_gqa}, backend, false );
2822
+ layer.bo = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_OUT, " bias" , i), {n_embd}, backend, false );
2823
2823
2824
2824
layer.ffn_norm = ml.create_tensor (ctx, tn (LLM_TENSOR_FFN_NORM, " weight" , i), {n_embd}, backend);
2825
2825
0 commit comments