From 36c373b7c9d61b0ddde08be5e95041014b4b7842 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 22 Sep 2025 10:33:26 +0200 Subject: [PATCH] feat(kokoro): add support for l4t devices (#6322) Signed-off-by: Ettore Di Giacinto --- .github/workflows/backend.yml | 12 ++++++++++++ backend/index.yaml | 12 ++++++++++++ backend/python/kokoro/requirements-l4t.txt | 7 +++++++ 3 files changed, 31 insertions(+) create mode 100644 backend/python/kokoro/requirements-l4t.txt diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index 70adcafcf..b303de1f4 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -489,6 +489,18 @@ jobs: backend: "diffusers" dockerfile: "./backend/Dockerfile.python" context: "./backend" + - build-type: 'l4t' + cuda-major-version: "12" + cuda-minor-version: "0" + platforms: 'linux/arm64' + tag-latest: 'auto' + tag-suffix: '-gpu-nvidia-l4t-kokoro' + runs-on: 'ubuntu-24.04-arm' + base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" + skip-drivers: 'true' + backend: "kokoro" + dockerfile: "./backend/Dockerfile.python" + context: "./backend" # SYCL additional backends - build-type: 'intel' cuda-major-version: "" diff --git a/backend/index.yaml b/backend/index.yaml index fbf4ab3ea..e078391d6 100644 --- a/backend/index.yaml +++ b/backend/index.yaml @@ -270,6 +270,7 @@ nvidia: "cuda12-kokoro" intel: "intel-kokoro" amd: "rocm-kokoro" + nvidia-l4t: "nvidia-l4t-kokoro" - &coqui urls: - https://github.com/idiap/coqui-ai-TTS @@ -1049,6 +1050,7 @@ nvidia: "cuda12-kokoro-development" intel: "intel-kokoro-development" amd: "rocm-kokoro-development" + nvidia-l4t: "nvidia-l4t-kokoro-development" - !!merge <<: *kokoro name: "cuda11-kokoro-development" uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-kokoro" @@ -1074,6 +1076,16 @@ uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-kokoro" mirrors: - localai/localai-backends:master-gpu-intel-kokoro +- !!merge <<: *kokoro + name: "nvidia-l4t-kokoro" + uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-l4t-kokoro" + mirrors: + - localai/localai-backends:latest-gpu-nvidia-l4t-kokoro +- !!merge <<: *kokoro + name: "nvidia-l4t-kokoro-development" + uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-l4t-kokoro" + mirrors: + - localai/localai-backends:master-gpu-nvidia-l4t-kokoro - !!merge <<: *kokoro name: "cuda11-kokoro" uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-kokoro" diff --git a/backend/python/kokoro/requirements-l4t.txt b/backend/python/kokoro/requirements-l4t.txt new file mode 100644 index 000000000..c03f853de --- /dev/null +++ b/backend/python/kokoro/requirements-l4t.txt @@ -0,0 +1,7 @@ +--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu126/ +torch +torchaudio +transformers +accelerate +kokoro +soundfile \ No newline at end of file