mirror of
https://github.com/mudler/LocalAI.git
synced 2026-01-04 01:30:08 -06:00
fix(llama.cpp): fix eos without cache (#1852)
This commit is contained in:
committed by
GitHub
parent
b202bfaaa0
commit
fa9e330fc6
@@ -1084,7 +1084,7 @@ struct llama_server_context
|
||||
slot.has_next_token = false;
|
||||
}
|
||||
|
||||
if (!slot.cache_tokens.empty() && result.tok == llama_token_eos(model))
|
||||
if (result.tok == llama_token_eos(model))
|
||||
{
|
||||
slot.stopped_eos = true;
|
||||
slot.has_next_token = false;
|
||||
|
||||
Reference in New Issue
Block a user