mirror of
https://github.com/mudler/LocalAI.git
synced 2025-12-30 14:10:24 -06:00
* feat: split remaining backends and drop embedded backends - Drop silero-vad, huggingface, and stores backend from embedded binaries - Refactor Makefile and Dockerfile to avoid building grpc backends - Drop golang code that was used to embed backends - Simplify building by using goreleaser Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chore(gallery): be specific with llama-cpp backend templates Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chore(docs): update Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chore(ci): minor fixes Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chore: drop all ffmpeg references Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix: run protogen-go Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Always enable p2p mode Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Update gorelease file Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix(stores): do not always load Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fix linting issues Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Simplify Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Mac OS fixup Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
45 lines
907 B
YAML
45 lines
907 B
YAML
---
|
|
name: "tuluv2"
|
|
|
|
config_file: |
|
|
backend: "llama-cpp"
|
|
mmap: true
|
|
template:
|
|
chat_message: |
|
|
<|{{ .RoleName }}|>
|
|
{{ if .FunctionCall -}}
|
|
Function call:
|
|
{{ else if eq .RoleName "tool" -}}
|
|
Function response:
|
|
{{ end -}}
|
|
{{ if .Content -}}
|
|
{{.Content }}
|
|
{{ end -}}
|
|
{{ if .FunctionCall -}}
|
|
{{toJson .FunctionCall}}
|
|
{{ end -}}
|
|
function: |
|
|
<|{{ .RoleName }}|>
|
|
{{ if .FunctionCall -}}
|
|
Function call:
|
|
{{ else if eq .RoleName "tool" -}}
|
|
Function response:
|
|
{{ end -}}
|
|
{{ if .Content -}}
|
|
{{.Content }}
|
|
{{ end -}}
|
|
{{ if .FunctionCall -}}
|
|
{{toJson .FunctionCall}}
|
|
{{ end -}}
|
|
chat: |
|
|
{{.Input -}}
|
|
<|assistant|>
|
|
completion: |
|
|
{{.Input}}
|
|
context_size: 4096
|
|
f16: true
|
|
stopwords:
|
|
- '<|im_end|>'
|
|
- '<dummy32000>'
|
|
- '<|endoftext|>'
|