mirror of
https://github.com/mudler/LocalAI.git
synced 2026-01-07 19:20:04 -06:00
* feat(loader): refactor single active backend support to LRU This changeset introduces LRU management of loaded backends. Users can set now a maximum number of models to be loaded concurrently, and, when setting LocalAI in single active backend mode we set LRU to 1 for backward compatibility. Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chore: add tests Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Update docs Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fixups Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
34 lines
685 B
Go
34 lines
685 B
Go
package backend
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
"github.com/mudler/LocalAI/core/config"
|
|
"github.com/mudler/LocalAI/pkg/grpc/proto"
|
|
"github.com/mudler/LocalAI/pkg/model"
|
|
)
|
|
|
|
func Detection(
|
|
sourceFile string,
|
|
loader *model.ModelLoader,
|
|
appConfig *config.ApplicationConfig,
|
|
modelConfig config.ModelConfig,
|
|
) (*proto.DetectResponse, error) {
|
|
opts := ModelOptions(modelConfig, appConfig)
|
|
detectionModel, err := loader.Load(opts...)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if detectionModel == nil {
|
|
return nil, fmt.Errorf("could not load detection model")
|
|
}
|
|
|
|
res, err := detectionModel.Detect(context.Background(), &proto.DetectOptions{
|
|
Src: sourceFile,
|
|
})
|
|
|
|
return res, err
|
|
}
|