fix: drop racy code, refactor and group API schema (#931)

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto
2023-08-20 14:04:45 +02:00
committed by GitHub
parent 28db83e17b
commit cc060a283d
55 changed files with 239 additions and 317 deletions
@@ -9,7 +9,7 @@ import (
)
type StableDiffusion struct {
base.Base
base.SingleThread
stablediffusion *stablediffusion.StableDiffusion
}
@@ -4,32 +4,23 @@ package bert
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
bert "github.com/go-skynet/go-bert.cpp"
"github.com/rs/zerolog/log"
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
)
type Embeddings struct {
base.Base
base.SingleThread
bert *bert.Bert
}
func (llm *Embeddings) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("bert backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
model, err := bert.New(opts.ModelFile)
llm.bert = model
return err
}
func (llm *Embeddings) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
if len(opts.EmbeddingTokens) > 0 {
tokens := []int{}
@@ -7,24 +7,17 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/rs/zerolog/log"
"github.com/go-skynet/bloomz.cpp"
)
type LLM struct {
base.Base
base.SingleThread
bloomz *bloomz.Bloomz
}
func (llm *LLM) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("bloomz backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
model, err := bloomz.New(opts.ModelFile)
llm.bloomz = model
return err
@@ -47,16 +40,11 @@ func buildPredictOptions(opts *pb.PredictOptions) []bloomz.PredictOption {
}
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.bloomz.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
go func() {
res, err := llm.bloomz.Predict(opts.Prompt, buildPredictOptions(opts)...)
@@ -65,7 +53,6 @@ func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) erro
}
results <- res
close(results)
llm.Base.Unlock()
}()
return nil
@@ -7,25 +7,17 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/rs/zerolog/log"
ggllm "github.com/mudler/go-ggllm.cpp"
)
type LLM struct {
base.Base
base.SingleThread
falcon *ggllm.Falcon
}
func (llm *LLM) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("falcon backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
ggllmOpts := []ggllm.ModelOption{}
if opts.ContextSize != 0 {
ggllmOpts = append(ggllmOpts, ggllm.SetContext(int(opts.ContextSize)))
@@ -126,13 +118,10 @@ func buildPredictOptions(opts *pb.PredictOptions) []ggllm.PredictOption {
}
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.falcon.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
predictOptions := buildPredictOptions(opts)
@@ -150,7 +139,6 @@ func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) erro
fmt.Println("err: ", err)
}
close(results)
llm.Base.Unlock()
}()
return nil
@@ -8,23 +8,15 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
gpt4all "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang"
"github.com/rs/zerolog/log"
)
type LLM struct {
base.Base
base.SingleThread
gpt4all *gpt4all.Model
}
func (llm *LLM) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("gpt4all backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
model, err := gpt4all.New(opts.ModelFile,
gpt4all.SetThreads(int(opts.Threads)),
gpt4all.SetLibrarySearchPath(opts.LibrarySearchPath))
@@ -47,15 +39,10 @@ func buildPredictOptions(opts *pb.PredictOptions) []gpt4all.PredictOption {
}
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.gpt4all.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
predictOptions := buildPredictOptions(opts)
go func() {
@@ -69,7 +56,6 @@ func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) erro
}
llm.gpt4all.SetTokenCallback(nil)
close(results)
llm.Base.Unlock()
}()
return nil
@@ -8,7 +8,6 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/go-skynet/LocalAI/pkg/langchain"
"github.com/rs/zerolog/log"
)
type LLM struct {
@@ -19,21 +18,12 @@ type LLM struct {
}
func (llm *LLM) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("langchain backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
llm.langchain, _ = langchain.NewHuggingFace(opts.Model)
llm.model = opts.Model
return nil
}
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
o := []langchain.PredictOption{
langchain.SetModel(llm.model),
langchain.SetMaxTokens(int(opts.Tokens)),
@@ -48,7 +38,6 @@ func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
}
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
o := []langchain.PredictOption{
langchain.SetModel(llm.model),
langchain.SetMaxTokens(int(opts.Tokens)),
@@ -63,7 +52,6 @@ func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) erro
}
results <- res.Completion
close(results)
llm.Base.Unlock()
}()
return nil
@@ -8,24 +8,15 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/go-skynet/go-llama.cpp"
"github.com/rs/zerolog/log"
)
type LLM struct {
base.Base
base.SingleThread
llama *llama.LLama
}
func (llm *LLM) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("llama backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
ropeFreqBase := float32(10000)
ropeFreqScale := float32(1)
@@ -176,14 +167,10 @@ func buildPredictOptions(opts *pb.PredictOptions) []llama.PredictOption {
}
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.llama.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
predictOptions := buildPredictOptions(opts)
predictOptions = append(predictOptions, llama.SetTokenCallback(func(token string) bool {
@@ -197,16 +184,12 @@ func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) erro
fmt.Println("err: ", err)
}
close(results)
llm.Base.Unlock()
}()
return nil
}
func (llm *LLM) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
predictOptions := buildPredictOptions(opts)
if len(opts.EmbeddingTokens) > 0 {
@@ -221,9 +204,6 @@ func (llm *LLM) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
}
func (llm *LLM) TokenizeString(opts *pb.PredictOptions) (pb.TokenizationResponse, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
predictOptions := buildPredictOptions(opts)
l, tokens, err := llm.llama.TokenizeString(opts.Prompt, predictOptions...)
if err != nil {
@@ -9,24 +9,17 @@ import (
"github.com/donomii/go-rwkv.cpp"
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/rs/zerolog/log"
)
const tokenizerSuffix = ".tokenizer.json"
type LLM struct {
base.Base
base.SingleThread
rwkv *rwkv.RwkvState
}
func (llm *LLM) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("rwkv backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
modelPath := filepath.Dir(opts.ModelFile)
modelFile := filepath.Base(opts.ModelFile)
model := rwkv.LoadFiles(opts.ModelFile, filepath.Join(modelPath, modelFile+tokenizerSuffix), uint32(opts.GetThreads()))
@@ -39,9 +32,6 @@ func (llm *LLM) Load(opts *pb.ModelOptions) error {
}
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
stopWord := "\n"
if len(opts.StopPrompts) > 0 {
stopWord = opts.StopPrompts[0]
@@ -57,7 +47,6 @@ func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
}
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
go func() {
stopWord := "\n"
@@ -75,7 +64,6 @@ func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) erro
return true
})
close(results)
llm.Base.Unlock()
}()
return nil
@@ -7,38 +7,28 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/rs/zerolog/log"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type Dolly struct {
base.Base
base.SingleThread
dolly *transformers.Dolly
}
func (llm *Dolly) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("dolly backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
model, err := transformers.NewDolly(opts.ModelFile)
llm.dolly = model
return err
}
func (llm *Dolly) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.dolly.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *Dolly) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
go func() {
res, err := llm.dolly.Predict(opts.Prompt, buildPredictOptions(opts)...)
@@ -48,7 +38,6 @@ func (llm *Dolly) PredictStream(opts *pb.PredictOptions, results chan string) er
}
results <- res
close(results)
llm.Base.Unlock()
}()
return nil
@@ -7,38 +7,28 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/rs/zerolog/log"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type Falcon struct {
base.Base
base.SingleThread
falcon *transformers.Falcon
}
func (llm *Falcon) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("transformers-falcon backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
model, err := transformers.NewFalcon(opts.ModelFile)
llm.falcon = model
return err
}
func (llm *Falcon) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.falcon.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *Falcon) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
go func() {
res, err := llm.falcon.Predict(opts.Prompt, buildPredictOptions(opts)...)
@@ -47,7 +37,6 @@ func (llm *Falcon) PredictStream(opts *pb.PredictOptions, results chan string) e
}
results <- res
close(results)
llm.Base.Unlock()
}()
return nil
@@ -7,38 +7,28 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/rs/zerolog/log"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type GPT2 struct {
base.Base
base.SingleThread
gpt2 *transformers.GPT2
}
func (llm *GPT2) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("gpt2 backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
model, err := transformers.New(opts.ModelFile)
llm.gpt2 = model
return err
}
func (llm *GPT2) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.gpt2.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *GPT2) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
go func() {
res, err := llm.gpt2.Predict(opts.Prompt, buildPredictOptions(opts)...)
@@ -47,7 +37,6 @@ func (llm *GPT2) PredictStream(opts *pb.PredictOptions, results chan string) err
}
results <- res
close(results)
llm.Base.Unlock()
}()
return nil
}
@@ -7,38 +7,28 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/rs/zerolog/log"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type GPTJ struct {
base.Base
base.SingleThread
gptj *transformers.GPTJ
}
func (llm *GPTJ) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("gptj backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
model, err := transformers.NewGPTJ(opts.ModelFile)
llm.gptj = model
return err
}
func (llm *GPTJ) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.gptj.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *GPTJ) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
go func() {
res, err := llm.gptj.Predict(opts.Prompt, buildPredictOptions(opts)...)
@@ -47,7 +37,6 @@ func (llm *GPTJ) PredictStream(opts *pb.PredictOptions, results chan string) err
}
results <- res
close(results)
llm.Base.Unlock()
}()
return nil
}
@@ -7,38 +7,28 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/rs/zerolog/log"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type GPTNeoX struct {
base.Base
base.SingleThread
gptneox *transformers.GPTNeoX
}
func (llm *GPTNeoX) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("gptneox backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
model, err := transformers.NewGPTNeoX(opts.ModelFile)
llm.gptneox = model
return err
}
func (llm *GPTNeoX) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.gptneox.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *GPTNeoX) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
go func() {
res, err := llm.gptneox.Predict(opts.Prompt, buildPredictOptions(opts)...)
@@ -47,7 +37,6 @@ func (llm *GPTNeoX) PredictStream(opts *pb.PredictOptions, results chan string)
}
results <- res
close(results)
llm.Base.Unlock()
}()
return nil
}
@@ -7,39 +7,28 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/rs/zerolog/log"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type MPT struct {
base.Base
base.SingleThread
mpt *transformers.MPT
}
func (llm *MPT) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("mpt backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
model, err := transformers.NewMPT(opts.ModelFile)
llm.mpt = model
return err
}
func (llm *MPT) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.mpt.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *MPT) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
go func() {
res, err := llm.mpt.Predict(opts.Prompt, buildPredictOptions(opts)...)
@@ -48,7 +37,6 @@ func (llm *MPT) PredictStream(opts *pb.PredictOptions, results chan string) erro
}
results <- res
close(results)
llm.Base.Unlock()
}()
return nil
}
@@ -7,38 +7,28 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/rs/zerolog/log"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type Replit struct {
base.Base
base.SingleThread
replit *transformers.Replit
}
func (llm *Replit) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("replit backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
model, err := transformers.NewReplit(opts.ModelFile)
llm.replit = model
return err
}
func (llm *Replit) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.replit.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *Replit) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
go func() {
res, err := llm.replit.Predict(opts.Prompt, buildPredictOptions(opts)...)
@@ -47,7 +37,6 @@ func (llm *Replit) PredictStream(opts *pb.PredictOptions, results chan string) e
}
results <- res
close(results)
llm.Base.Unlock()
}()
return nil
}
@@ -7,38 +7,28 @@ import (
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/rs/zerolog/log"
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
)
type Starcoder struct {
base.Base
base.SingleThread
starcoder *transformers.Starcoder
}
func (llm *Starcoder) Load(opts *pb.ModelOptions) error {
if llm.Base.State != pb.StatusResponse_UNINITIALIZED {
log.Warn().Msgf("starcoder backend loading %s while already in state %s!", opts.Model, llm.Base.State.String())
}
llm.Base.Lock()
defer llm.Base.Unlock()
model, err := transformers.NewStarcoder(opts.ModelFile)
llm.starcoder = model
return err
}
func (llm *Starcoder) Predict(opts *pb.PredictOptions) (string, error) {
llm.Base.Lock()
defer llm.Base.Unlock()
return llm.starcoder.Predict(opts.Prompt, buildPredictOptions(opts)...)
}
// fallback to Predict
func (llm *Starcoder) PredictStream(opts *pb.PredictOptions, results chan string) error {
llm.Base.Lock()
go func() {
res, err := llm.starcoder.Predict(opts.Prompt, buildPredictOptions(opts)...)
@@ -47,7 +37,6 @@ func (llm *Starcoder) PredictStream(opts *pb.PredictOptions, results chan string
}
results <- res
close(results)
llm.Base.Unlock()
}()
return nil
@@ -1,4 +1,4 @@
package whisper
package transcribe
import (
"fmt"
@@ -7,8 +7,8 @@ import (
"path/filepath"
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
wav "github.com/go-audio/wav"
"github.com/go-skynet/LocalAI/pkg/grpc/whisper/api"
"github.com/go-audio/wav"
"github.com/go-skynet/LocalAI/api/schema"
)
func sh(c string) (string, error) {
@@ -29,8 +29,8 @@ func audioToWav(src, dst string) error {
return nil
}
func Transcript(model whisper.Model, audiopath, language string, threads uint) (api.Result, error) {
res := api.Result{}
func Transcript(model whisper.Model, audiopath, language string, threads uint) (schema.Result, error) {
res := schema.Result{}
dir, err := os.MkdirTemp("", "whisper")
if err != nil {
@@ -90,7 +90,7 @@ func Transcript(model whisper.Model, audiopath, language string, threads uint) (
tokens = append(tokens, t.Id)
}
segment := api.Segment{Id: s.Num, Text: s.Text, Start: s.Start, End: s.End, Tokens: tokens}
segment := schema.Segment{Id: s.Num, Text: s.Text, Start: s.Start, End: s.End, Tokens: tokens}
res.Segments = append(res.Segments, segment)
res.Text += s.Text
@@ -4,14 +4,13 @@ package transcribe
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
"github.com/go-skynet/LocalAI/api/schema"
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
whisperutil "github.com/go-skynet/LocalAI/pkg/grpc/whisper"
"github.com/go-skynet/LocalAI/pkg/grpc/whisper/api"
)
type Whisper struct {
base.Base
base.SingleThread
whisper whisper.Model
}
@@ -22,6 +21,6 @@ func (sd *Whisper) Load(opts *pb.ModelOptions) error {
return err
}
func (sd *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (api.Result, error) {
return whisperutil.Transcript(sd.whisper, opts.Dst, opts.Language, uint(opts.Threads))
func (sd *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (schema.Result, error) {
return Transcript(sd.whisper, opts.Dst, opts.Language, uint(opts.Threads))
}
@@ -13,7 +13,7 @@ import (
)
type Piper struct {
base.Base
base.SingleThread
piper *PiperB
}
+20 -21
View File
@@ -5,34 +5,32 @@ package base
import (
"fmt"
"os"
"sync"
"github.com/go-skynet/LocalAI/api/schema"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/go-skynet/LocalAI/pkg/grpc/whisper/api"
gopsutil "github.com/shirou/gopsutil/v3/process"
)
// Base is a base class for all backends to implement
// Note: the backends that does not support multiple requests
// should use SingleThread instead
type Base struct {
backendBusy sync.Mutex
State pb.StatusResponse_State
}
func (llm *Base) Busy() bool {
r := llm.backendBusy.TryLock()
if r {
llm.backendBusy.Unlock()
}
return r
func (llm *Base) Locking() bool {
return false
}
func (llm *Base) Lock() {
llm.backendBusy.Lock()
llm.State = pb.StatusResponse_BUSY
panic("not implemented")
}
func (llm *Base) Unlock() {
llm.State = pb.StatusResponse_READY
llm.backendBusy.Unlock()
panic("not implemented")
}
func (llm *Base) Busy() bool {
return false
}
func (llm *Base) Load(opts *pb.ModelOptions) error {
@@ -55,8 +53,8 @@ func (llm *Base) GenerateImage(*pb.GenerateImageRequest) error {
return fmt.Errorf("unimplemented")
}
func (llm *Base) AudioTranscription(*pb.TranscriptRequest) (api.Result, error) {
return api.Result{}, fmt.Errorf("unimplemented")
func (llm *Base) AudioTranscription(*pb.TranscriptRequest) (schema.Result, error) {
return schema.Result{}, fmt.Errorf("unimplemented")
}
func (llm *Base) TTS(*pb.TTSRequest) error {
@@ -69,7 +67,12 @@ func (llm *Base) TokenizeString(opts *pb.PredictOptions) (pb.TokenizationRespons
// backends may wish to call this to capture the gopsutil info, then enhance with additional memory usage details?
func (llm *Base) Status() (pb.StatusResponse, error) {
return pb.StatusResponse{
Memory: memoryUsage(),
}, nil
}
func memoryUsage() *pb.MemoryUsageData {
mud := pb.MemoryUsageData{
Breakdown: make(map[string]uint64),
}
@@ -85,9 +88,5 @@ func (llm *Base) Status() (pb.StatusResponse, error) {
mud.Breakdown["gopsutil-RSS"] = memInfo.RSS
}
}
return pb.StatusResponse{
State: llm.State,
Memory: &mud,
}, nil
return &mud
}
+52
View File
@@ -0,0 +1,52 @@
package base
import (
"sync"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
)
// SingleThread are backends that does not support multiple requests.
// There will be only one request being served at the time.
// This is useful for models that are not thread safe and cannot run
// multiple requests at the same time.
type SingleThread struct {
Base
backendBusy sync.Mutex
}
// Locking returns true if the backend needs to lock resources
func (llm *SingleThread) Locking() bool {
return true
}
func (llm *SingleThread) Lock() {
llm.backendBusy.Lock()
}
func (llm *SingleThread) Unlock() {
llm.backendBusy.Unlock()
}
func (llm *SingleThread) Busy() bool {
r := llm.backendBusy.TryLock()
if r {
llm.backendBusy.Unlock()
}
return r
}
// backends may wish to call this to capture the gopsutil info, then enhance with additional memory usage details?
func (llm *SingleThread) Status() (pb.StatusResponse, error) {
mud := memoryUsage()
state := pb.StatusResponse_READY
if llm.Busy() {
state = pb.StatusResponse_BUSY
}
return pb.StatusResponse{
State: state,
Memory: mud,
}, nil
}
+4 -4
View File
@@ -7,8 +7,8 @@ import (
"sync"
"time"
"github.com/go-skynet/LocalAI/api/schema"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/go-skynet/LocalAI/pkg/grpc/whisper/api"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
@@ -158,7 +158,7 @@ func (c *Client) TTS(ctx context.Context, in *pb.TTSRequest, opts ...grpc.CallOp
return client.TTS(ctx, in, opts...)
}
func (c *Client) AudioTranscription(ctx context.Context, in *pb.TranscriptRequest, opts ...grpc.CallOption) (*api.Result, error) {
func (c *Client) AudioTranscription(ctx context.Context, in *pb.TranscriptRequest, opts ...grpc.CallOption) (*schema.Result, error) {
c.setBusy(true)
defer c.setBusy(false)
conn, err := grpc.Dial(c.address, grpc.WithTransportCredentials(insecure.NewCredentials()))
@@ -171,14 +171,14 @@ func (c *Client) AudioTranscription(ctx context.Context, in *pb.TranscriptReques
if err != nil {
return nil, err
}
tresult := &api.Result{}
tresult := &schema.Result{}
for _, s := range res.Segments {
tks := []int{}
for _, t := range s.Tokens {
tks = append(tks, int(t))
}
tresult.Segments = append(tresult.Segments,
api.Segment{
schema.Segment{
Text: s.Text,
Id: int(s.Id),
Start: time.Duration(s.Start),
+5 -2
View File
@@ -1,18 +1,21 @@
package grpc
import (
"github.com/go-skynet/LocalAI/api/schema"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
"github.com/go-skynet/LocalAI/pkg/grpc/whisper/api"
)
type LLM interface {
Busy() bool
Lock()
Unlock()
Locking() bool
Predict(*pb.PredictOptions) (string, error)
PredictStream(*pb.PredictOptions, chan string) error
Load(*pb.ModelOptions) error
Embeddings(*pb.PredictOptions) ([]float32, error)
GenerateImage(*pb.GenerateImageRequest) error
AudioTranscription(*pb.TranscriptRequest) (api.Result, error)
AudioTranscription(*pb.TranscriptRequest) (schema.Result, error)
TTS(*pb.TTSRequest) error
TokenizeString(*pb.PredictOptions) (pb.TokenizationResponse, error)
Status() (pb.StatusResponse, error)
+32 -1
View File
@@ -30,6 +30,10 @@ func (s *server) Health(ctx context.Context, in *pb.HealthMessage) (*pb.Reply, e
}
func (s *server) Embedding(ctx context.Context, in *pb.PredictOptions) (*pb.EmbeddingResult, error) {
if s.llm.Locking() {
s.llm.Lock()
defer s.llm.Unlock()
}
embeds, err := s.llm.Embeddings(in)
if err != nil {
return nil, err
@@ -39,6 +43,10 @@ func (s *server) Embedding(ctx context.Context, in *pb.PredictOptions) (*pb.Embe
}
func (s *server) LoadModel(ctx context.Context, in *pb.ModelOptions) (*pb.Result, error) {
if s.llm.Locking() {
s.llm.Lock()
defer s.llm.Unlock()
}
err := s.llm.Load(in)
if err != nil {
return &pb.Result{Message: fmt.Sprintf("Error loading model: %s", err.Error()), Success: false}, err
@@ -47,11 +55,19 @@ func (s *server) LoadModel(ctx context.Context, in *pb.ModelOptions) (*pb.Result
}
func (s *server) Predict(ctx context.Context, in *pb.PredictOptions) (*pb.Reply, error) {
if s.llm.Locking() {
s.llm.Lock()
defer s.llm.Unlock()
}
result, err := s.llm.Predict(in)
return newReply(result), err
}
func (s *server) GenerateImage(ctx context.Context, in *pb.GenerateImageRequest) (*pb.Result, error) {
if s.llm.Locking() {
s.llm.Lock()
defer s.llm.Unlock()
}
err := s.llm.GenerateImage(in)
if err != nil {
return &pb.Result{Message: fmt.Sprintf("Error generating image: %s", err.Error()), Success: false}, err
@@ -60,6 +76,10 @@ func (s *server) GenerateImage(ctx context.Context, in *pb.GenerateImageRequest)
}
func (s *server) TTS(ctx context.Context, in *pb.TTSRequest) (*pb.Result, error) {
if s.llm.Locking() {
s.llm.Lock()
defer s.llm.Unlock()
}
err := s.llm.TTS(in)
if err != nil {
return &pb.Result{Message: fmt.Sprintf("Error generating audio: %s", err.Error()), Success: false}, err
@@ -68,6 +88,10 @@ func (s *server) TTS(ctx context.Context, in *pb.TTSRequest) (*pb.Result, error)
}
func (s *server) AudioTranscription(ctx context.Context, in *pb.TranscriptRequest) (*pb.TranscriptResult, error) {
if s.llm.Locking() {
s.llm.Lock()
defer s.llm.Unlock()
}
result, err := s.llm.AudioTranscription(in)
if err != nil {
return nil, err
@@ -93,7 +117,10 @@ func (s *server) AudioTranscription(ctx context.Context, in *pb.TranscriptReques
}
func (s *server) PredictStream(in *pb.PredictOptions, stream pb.Backend_PredictStreamServer) error {
if s.llm.Locking() {
s.llm.Lock()
defer s.llm.Unlock()
}
resultChan := make(chan string)
done := make(chan bool)
@@ -111,6 +138,10 @@ func (s *server) PredictStream(in *pb.PredictOptions, stream pb.Backend_PredictS
}
func (s *server) TokenizeString(ctx context.Context, in *pb.PredictOptions) (*pb.TokenizationResponse, error) {
if s.llm.Locking() {
s.llm.Lock()
defer s.llm.Unlock()
}
res, err := s.llm.TokenizeString(in)
if err != nil {
return nil, err
-16
View File
@@ -1,16 +0,0 @@
package api
import "time"
type Segment struct {
Id int `json:"id"`
Start time.Duration `json:"start"`
End time.Duration `json:"end"`
Text string `json:"text"`
Tokens []int `json:"tokens"`
}
type Result struct {
Segments []Segment `json:"segments"`
Text string `json:"text"`
}