feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)

* feat(llama.cpp): Enable decentralized, distributed inference

As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.

This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).

As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.

When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.

This feature is behind the "p2p" GO_FLAGS

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* go mod tidy

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* ci: add p2p tag

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* better message

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto
2024-05-20 19:17:59 +02:00
committed by GitHub
parent 16474bfb40
commit fdb45153fe
17 changed files with 1243 additions and 70 deletions
+10
View File
@@ -0,0 +1,10 @@
package worker
type WorkerFlags struct {
BackendAssetsPath string `env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
}
type Worker struct {
P2P P2P `cmd:"" name:"p2p-llama-cpp-rpc" help:"Starts a LocalAI llama.cpp worker in P2P mode (requires a token)"`
LLamaCPP LLamaCPP `cmd:"" name:"llama-cpp-rpc" help:"Starts a llama.cpp worker in standalone mode"`
}
+43
View File
@@ -0,0 +1,43 @@
package worker
import (
"fmt"
"os"
"syscall"
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
"github.com/go-skynet/LocalAI/pkg/assets"
"github.com/rs/zerolog/log"
)
type LLamaCPP struct {
Args []string `arg:"" optional:"" name:"models" help:"Model configuration URLs to load"`
WorkerFlags `embed:""`
}
func (r *LLamaCPP) Run(ctx *cliContext.Context) error {
// Extract files from the embedded FS
err := assets.ExtractFiles(ctx.BackendAssets, r.BackendAssetsPath)
log.Debug().Msgf("Extracting backend assets files to %s", r.BackendAssetsPath)
if err != nil {
log.Warn().Msgf("Failed extracting backend assets files: %s (might be required for some backends to work properly, like gpt4all)", err)
}
if len(os.Args) < 4 {
return fmt.Errorf("usage: local-ai worker llama-cpp-rpc -- <llama-rpc-server-args>")
}
return syscall.Exec(
assets.ResolvePath(
r.BackendAssetsPath,
"util",
"llama-cpp-rpc-server",
),
append([]string{
assets.ResolvePath(
r.BackendAssetsPath,
"util",
"llama-cpp-rpc-server",
)}, os.Args[4:]...),
os.Environ())
}
+16
View File
@@ -0,0 +1,16 @@
//go:build !p2p
// +build !p2p
package worker
import (
"fmt"
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
)
type P2P struct{}
func (r *P2P) Run(ctx *cliContext.Context) error {
return fmt.Errorf("p2p mode is not enabled in this build")
}
+104
View File
@@ -0,0 +1,104 @@
//go:build p2p
// +build p2p
package worker
import (
"context"
"fmt"
"os"
"os/exec"
"time"
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
"github.com/go-skynet/LocalAI/core/p2p"
"github.com/go-skynet/LocalAI/pkg/assets"
"github.com/phayes/freeport"
"github.com/rs/zerolog/log"
)
type P2P struct {
WorkerFlags `embed:""`
Token string `env:"LOCALAI_TOKEN,TOKEN" help:"JSON list of galleries"`
NoRunner bool `env:"LOCALAI_NO_RUNNER,NO_RUNNER" help:"Do not start the llama-cpp-rpc-server"`
RunnerAddress string `env:"LOCALAI_RUNNER_ADDRESS,RUNNER_ADDRESS" help:"Address of the llama-cpp-rpc-server"`
RunnerPort string `env:"LOCALAI_RUNNER_PORT,RUNNER_PORT" help:"Port of the llama-cpp-rpc-server"`
ExtraLLamaCPPArgs []string `env:"LOCALAI_EXTRA_LLAMA_CPP_ARGS,EXTRA_LLAMA_CPP_ARGS" help:"Extra arguments to pass to llama-cpp-rpc-server"`
}
func (r *P2P) Run(ctx *cliContext.Context) error {
// Extract files from the embedded FS
err := assets.ExtractFiles(ctx.BackendAssets, r.BackendAssetsPath)
log.Debug().Msgf("Extracting backend assets files to %s", r.BackendAssetsPath)
if err != nil {
log.Warn().Msgf("Failed extracting backend assets files: %s (might be required for some backends to work properly, like gpt4all)", err)
}
// Check if the token is set
// as we always need it.
if r.Token == "" {
return fmt.Errorf("Token is required")
}
port, err := freeport.GetFreePort()
if err != nil {
return err
}
address := "127.0.0.1"
if r.NoRunner {
// Let override which port and address to bind if the user
// configure the llama-cpp service on its own
p := fmt.Sprint(port)
if r.RunnerAddress != "" {
address = r.RunnerAddress
}
if r.RunnerPort != "" {
p = r.RunnerPort
}
err = p2p.BindLLamaCPPWorker(context.Background(), address, p, r.Token)
if err != nil {
return err
}
log.Info().Msgf("You need to start llama-cpp-rpc-server on '%s:%s'", address, p)
return nil
}
// Start llama.cpp directly from the version we have pre-packaged
go func() {
for {
log.Info().Msgf("Starting llama-cpp-rpc-server on '%s:%d'", address, port)
cmd := exec.Command(
assets.ResolvePath(
r.BackendAssetsPath,
"util",
"llama-cpp-rpc-server",
),
append([]string{"--host", address, "--port", fmt.Sprint(port)}, r.ExtraLLamaCPPArgs...)...,
)
cmd.Env = os.Environ()
cmd.Stderr = os.Stdout
cmd.Stdout = os.Stdout
if err := cmd.Start(); err != nil {
log.Error().Err(err).Msg("Failed to start llama-cpp-rpc-server")
}
cmd.Wait()
}
}()
err = p2p.BindLLamaCPPWorker(context.Background(), address, fmt.Sprint(port), r.Token)
if err != nil {
return err
}
for {
time.Sleep(1 * time.Second)
}
}