@@ -5903,6 +5903,13 @@ static int llama_decode_internal(
5903
5903
5904
5904
ggml_allocr_alloc_graph(lctx.alloc, gf);
5905
5905
5906
+ struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
5907
+ struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
5908
+
5909
+ GGML_ASSERT(strcmp(res->name, "result_output") == 0);
5910
+ GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
5911
+
5912
+
5906
5913
#ifdef GGML_USE_CUBLAS
5907
5914
for (int i = 0; i < gf->n_leafs; i++) {
5908
5915
ggml_tensor * node = gf->leafs[i];
@@ -5920,6 +5927,12 @@ static int llama_decode_internal(
5920
5927
}
5921
5928
5922
5929
ggml_cuda_set_mul_mat_q(cparams.mul_mat_q);
5930
+
5931
+ // HACK: ggml-alloc may change the tensor backend when reusing a parent, so force output to be on the CPU here if needed
5932
+ if (!lctx.embedding.empty()) {
5933
+ embeddings->backend = GGML_BACKEND_CPU;
5934
+ }
5935
+ res->backend = GGML_BACKEND_CPU;
5923
5936
#endif
5924
5937
5925
5938
// LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
@@ -5944,12 +5957,6 @@ static int llama_decode_internal(
5944
5957
n_threads = 1;
5945
5958
}
5946
5959
5947
- struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
5948
- struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
5949
-
5950
- GGML_ASSERT(strcmp(res->name, "result_output") == 0);
5951
- GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
5952
-
5953
5960
#if GGML_USE_MPI
5954
5961
const int64_t n_layer = hparams.n_layer;
5955
5962
ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
0 commit comments