Skip to content

Commit 5a7d312

Browse files
committed
llama : avoid using "optional" keyword (#4283)
1 parent d5a1cbd commit 5a7d312

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

llama.cpp

+6-6
Original file line numberDiff line numberDiff line change
@@ -1991,11 +1991,11 @@ struct llama_model_loader {
19911991
return tensor;
19921992
}
19931993

1994-
struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool optional = false) {
1994+
struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool required = true) {
19951995
struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
19961996

19971997
if (cur == NULL) {
1998-
if (optional) {
1998+
if (!required) {
19991999
return NULL;
20002000
}
20012001
throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
@@ -2816,10 +2816,10 @@ static void llm_load_tensors(
28162816
layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
28172817

28182818
// optional bias tensors
2819-
layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, true);
2820-
layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, true);
2821-
layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, true);
2822-
layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, true);
2819+
layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, false);
2820+
layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, false);
2821+
layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, false);
2822+
layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, false);
28232823

28242824
layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
28252825

0 commit comments

Comments
 (0)