chore(deps): bump llama.cpp to '0e1ccf15c7b6d05c720551b537857ecf6194d420' (#7684)

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto
2025-12-22 09:50:42 +01:00
committed by GitHub
parent 8b3e0ebf8a
commit fc6057a952
2 changed files with 1 additions and 4 deletions

View File

@@ -1,5 +1,5 @@
LLAMA_VERSION?=52ab19df633f3de5d4db171a16f2d9edd2342fec
LLAMA_VERSION?=0e1ccf15c7b6d05c720551b537857ecf6194d420
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
CMAKE_ARGS?=

View File

@@ -78,9 +78,6 @@ static void start_llama_server(server_context& ctx_server) {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
ctx_server.init();
//state.store(SERVER_STATE_READY);
LOG_INF("%s: model loaded\n", __func__);
// print sample chat example to make it clear which template is used