Skip to content

Commit

Permalink
llama : temp fix for clearing "future" tokens from the KV cache
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed Oct 2, 2023
1 parent 6a9fe3d commit 0f332a9
Showing 1 changed file with 19 additions and 0 deletions.
19 changes: 19 additions & 0 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7478,6 +7478,25 @@ void llama_batch_free(struct llama_batch batch) {
int llama_decode(
struct llama_context * ctx,
struct llama_batch batch) {
// TODO: temporary solution to auto clear "future" tokens from the cache
// ref: https://github.com/ggerganov/llama.cpp/pull/3400
if (batch.pos) {
std::map<llama_seq_id, llama_pos> seq_min_pos;
for (int i = 0; i < batch.n_tokens; i++) {
if (seq_min_pos.count(batch.seq_id[i]) == 0) {
seq_min_pos[batch.seq_id[i]] = batch.pos[i];
} else {
seq_min_pos[batch.seq_id[i]] = std::min(seq_min_pos[batch.seq_id[i]], batch.pos[i]);
}
}

for (auto & kv : seq_min_pos) {
llama_kv_cache_seq_rm(ctx->kv_self, kv.first, kv.second, ctx->cparams.n_ctx);
}
} else {
llama_kv_cache_seq_rm(ctx->kv_self, batch.all_seq_id, batch.all_pos_0, ctx->cparams.n_ctx);
}

const int ret = llama_decode_internal(*ctx, batch);
if (ret < 0) {
LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
Expand Down

0 comments on commit 0f332a9

Please sign in to comment.