Files
LocalAI/swagger/swagger.yaml
LocalAI [bot] 54ff70e451 feat(swagger): update swagger (#6162)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-29 08:11:34 +02:00

1248 lines
29 KiB
YAML

basePath: /
definitions:
config.Gallery:
properties:
name:
type: string
url:
type: string
type: object
functions.Function:
properties:
description:
type: string
name:
type: string
parameters:
additionalProperties: true
type: object
strict:
type: boolean
type: object
functions.Item:
properties:
properties:
additionalProperties: true
type: object
type:
type: string
type: object
functions.JSONFunctionStructure:
properties:
$defs:
additionalProperties: true
type: object
anyOf:
items:
$ref: '#/definitions/functions.Item'
type: array
oneOf:
items:
$ref: '#/definitions/functions.Item'
type: array
type: object
functions.Tool:
properties:
function:
$ref: '#/definitions/functions.Function'
type:
type: string
type: object
gallery.File:
properties:
filename:
type: string
sha256:
type: string
uri:
type: string
type: object
gallery.GalleryBackend:
properties:
alias:
type: string
capabilities:
additionalProperties:
type: string
type: object
description:
type: string
files:
description: AdditionalFiles are used to add additional files to the model
items:
$ref: '#/definitions/gallery.File'
type: array
gallery:
allOf:
- $ref: '#/definitions/config.Gallery'
description: Gallery is a reference to the gallery which contains the model
icon:
type: string
installed:
description: Installed is used to indicate if the model is installed or not
type: boolean
license:
type: string
mirrors:
items:
type: string
type: array
name:
type: string
tags:
items:
type: string
type: array
uri:
type: string
url:
type: string
urls:
items:
type: string
type: array
type: object
gallery.GalleryModel:
properties:
config_file:
additionalProperties: true
description: config_file is read in the situation where URL is blank - and
therefore this is a base config.
type: object
description:
type: string
files:
description: AdditionalFiles are used to add additional files to the model
items:
$ref: '#/definitions/gallery.File'
type: array
gallery:
allOf:
- $ref: '#/definitions/config.Gallery'
description: Gallery is a reference to the gallery which contains the model
icon:
type: string
installed:
description: Installed is used to indicate if the model is installed or not
type: boolean
license:
type: string
name:
type: string
overrides:
additionalProperties: true
description: Overrides are used to override the configuration of the model
located at URL
type: object
tags:
items:
type: string
type: array
url:
type: string
urls:
items:
type: string
type: array
type: object
localai.GalleryBackend:
properties:
id:
type: string
type: object
localai.GalleryModel:
properties:
config_file:
additionalProperties: true
description: config_file is read in the situation where URL is blank - and
therefore this is a base config.
type: object
description:
type: string
files:
description: AdditionalFiles are used to add additional files to the model
items:
$ref: '#/definitions/gallery.File'
type: array
gallery:
allOf:
- $ref: '#/definitions/config.Gallery'
description: Gallery is a reference to the gallery which contains the model
icon:
type: string
id:
type: string
installed:
description: Installed is used to indicate if the model is installed or not
type: boolean
license:
type: string
name:
type: string
overrides:
additionalProperties: true
description: Overrides are used to override the configuration of the model
located at URL
type: object
tags:
items:
type: string
type: array
url:
type: string
urls:
items:
type: string
type: array
type: object
proto.MemoryUsageData:
properties:
breakdown:
additionalProperties:
format: int64
type: integer
type: object
total:
type: integer
type: object
proto.StatusResponse:
properties:
memory:
$ref: '#/definitions/proto.MemoryUsageData'
state:
$ref: '#/definitions/proto.StatusResponse_State'
type: object
proto.StatusResponse_State:
enum:
- 0
- 1
- 2
- -1
format: int32
type: integer
x-enum-varnames:
- StatusResponse_UNINITIALIZED
- StatusResponse_BUSY
- StatusResponse_READY
- StatusResponse_ERROR
proto.VADResponse:
properties:
segments:
items:
$ref: '#/definitions/proto.VADSegment'
type: array
type: object
proto.VADSegment:
properties:
end:
type: number
start:
type: number
type: object
schema.BackendMonitorRequest:
properties:
model:
type: string
type: object
schema.BackendResponse:
properties:
id:
type: string
status_url:
type: string
type: object
schema.Choice:
properties:
delta:
$ref: '#/definitions/schema.Message'
finish_reason:
type: string
index:
type: integer
message:
$ref: '#/definitions/schema.Message'
text:
type: string
type: object
schema.Detection:
properties:
class_name:
type: string
height:
type: number
width:
type: number
x:
type: number
"y":
type: number
type: object
schema.DetectionRequest:
properties:
image:
type: string
model:
type: string
type: object
schema.DetectionResponse:
properties:
detections:
items:
$ref: '#/definitions/schema.Detection'
type: array
type: object
schema.ElevenLabsSoundGenerationRequest:
properties:
do_sample:
type: boolean
duration_seconds:
type: number
model_id:
type: string
prompt_influence:
type: number
text:
type: string
type: object
schema.FunctionCall:
properties:
arguments:
type: string
name:
type: string
type: object
schema.GalleryResponse:
properties:
status:
type: string
uuid:
type: string
type: object
schema.Item:
properties:
b64_json:
type: string
embedding:
items:
type: number
type: array
index:
type: integer
object:
type: string
url:
description: Images
type: string
type: object
schema.JINADocumentResult:
properties:
document:
$ref: '#/definitions/schema.JINAText'
index:
type: integer
relevance_score:
type: number
type: object
schema.JINARerankRequest:
properties:
backend:
type: string
documents:
items:
type: string
type: array
model:
type: string
query:
type: string
top_n:
type: integer
type: object
schema.JINARerankResponse:
properties:
model:
type: string
results:
items:
$ref: '#/definitions/schema.JINADocumentResult'
type: array
usage:
$ref: '#/definitions/schema.JINAUsageInfo'
type: object
schema.JINAText:
properties:
text:
type: string
type: object
schema.JINAUsageInfo:
properties:
prompt_tokens:
type: integer
total_tokens:
type: integer
type: object
schema.Message:
properties:
content:
description: The message content
function_call:
description: A result of a function call
name:
description: The message name (used for tools calls)
type: string
role:
description: The message role
type: string
string_audios:
items:
type: string
type: array
string_content:
type: string
string_images:
items:
type: string
type: array
string_videos:
items:
type: string
type: array
tool_calls:
items:
$ref: '#/definitions/schema.ToolCall'
type: array
type: object
schema.ModelsDataResponse:
properties:
data:
items:
$ref: '#/definitions/schema.OpenAIModel'
type: array
object:
type: string
type: object
schema.NodeData:
properties:
id:
type: string
lastSeen:
type: string
name:
type: string
serviceID:
type: string
tunnelAddress:
type: string
type: object
schema.OpenAIModel:
properties:
id:
type: string
object:
type: string
type: object
schema.OpenAIRequest:
properties:
backend:
type: string
batch:
description: Custom parameters - not present in the OpenAI API
type: integer
clip_skip:
description: Diffusers
type: integer
echo:
type: boolean
file:
description: whisper
type: string
files:
description: Multiple input images for img2img or inpainting
items:
type: string
type: array
frequency_penalty:
type: number
function_call:
description: might be a string or an object
functions:
description: A list of available functions to call
items:
$ref: '#/definitions/functions.Function'
type: array
grammar:
description: A grammar to constrain the LLM output
type: string
grammar_json_functions:
$ref: '#/definitions/functions.JSONFunctionStructure'
ignore_eos:
type: boolean
input: {}
instruction:
description: Edit endpoint
type: string
language:
description: Also part of the OpenAI official spec
type: string
max_tokens:
type: integer
messages:
description: Messages is read only by chat/completion API calls
items:
$ref: '#/definitions/schema.Message'
type: array
metadata:
additionalProperties:
type: string
type: object
mode:
description: Image (not supported by OpenAI)
type: integer
model:
type: string
model_base_name:
type: string
"n":
description: Also part of the OpenAI official spec. use it for returning multiple
results
type: integer
n_keep:
type: integer
negative_prompt:
type: string
negative_prompt_scale:
type: number
presence_penalty:
type: number
prompt:
description: Prompt is read only by completion/image API calls
quality:
type: string
reasoning_effort:
type: string
ref_images:
description: Reference images for models that support them (e.g., Flux Kontext)
items:
type: string
type: array
repeat_last_n:
type: integer
repeat_penalty:
type: number
response_format:
description: whisper/image
rope_freq_base:
type: number
rope_freq_scale:
type: number
seed:
type: integer
size:
description: image
type: string
step:
type: integer
stop: {}
stream:
type: boolean
temperature:
type: number
tfz:
type: number
tokenizer:
description: RWKV (?)
type: string
tool_choice: {}
tools:
items:
$ref: '#/definitions/functions.Tool'
type: array
top_k:
type: integer
top_p:
description: Common options between all the API calls, part of the OpenAI
spec
type: number
translate:
description: Only for audio transcription
type: boolean
typical_p:
type: number
required:
- file
type: object
schema.OpenAIResponse:
properties:
choices:
items:
$ref: '#/definitions/schema.Choice'
type: array
created:
type: integer
data:
items:
$ref: '#/definitions/schema.Item'
type: array
id:
type: string
model:
type: string
object:
type: string
usage:
$ref: '#/definitions/schema.OpenAIUsage'
type: object
schema.OpenAIUsage:
properties:
completion_tokens:
type: integer
prompt_tokens:
type: integer
timing_prompt_processing:
description: Extra timing data, disabled by default as is't not a part of
OpenAI specification
type: number
timing_token_generation:
type: number
total_tokens:
type: integer
type: object
schema.P2PNodesResponse:
properties:
federated_nodes:
items:
$ref: '#/definitions/schema.NodeData'
type: array
nodes:
items:
$ref: '#/definitions/schema.NodeData'
type: array
type: object
schema.SysInfoModel:
properties:
id:
type: string
type: object
schema.SystemInformationResponse:
properties:
backends:
items:
type: string
type: array
loaded_models:
items:
$ref: '#/definitions/schema.SysInfoModel'
type: array
type: object
schema.TTSRequest:
description: TTS request body
properties:
backend:
type: string
input:
description: text input
type: string
language:
description: (optional) language to use with TTS model
type: string
model:
type: string
response_format:
description: (optional) output format
type: string
voice:
description: voice audio file or speaker id
type: string
type: object
schema.TokenizeRequest:
properties:
content:
type: string
model:
type: string
type: object
schema.TokenizeResponse:
properties:
tokens:
items:
type: integer
type: array
type: object
schema.ToolCall:
properties:
function:
$ref: '#/definitions/schema.FunctionCall'
id:
type: string
index:
type: integer
type:
type: string
type: object
schema.VADRequest:
description: VAD request body
properties:
audio:
description: model name or full path
items:
type: number
type: array
model:
type: string
type: object
schema.VideoRequest:
properties:
cfg_scale:
type: number
end_image:
type: string
fps:
type: integer
height:
type: integer
model:
type: string
negative_prompt:
type: string
num_frames:
type: integer
prompt:
type: string
response_format:
type: string
seed:
type: integer
start_image:
type: string
step:
type: integer
width:
type: integer
type: object
services.GalleryOpStatus:
properties:
deletion:
description: Deletion is true if the operation is a deletion
type: boolean
downloaded_size:
type: string
error: {}
file_name:
type: string
file_size:
type: string
gallery_element_name:
type: string
message:
type: string
processed:
type: boolean
progress:
type: number
type: object
info:
contact:
name: LocalAI
url: https://localai.io
description: The LocalAI Rest API.
license:
name: MIT
url: https://raw.githubusercontent.com/mudler/LocalAI/master/LICENSE
title: LocalAI API
version: 2.0.0
paths:
/api/p2p:
get:
responses:
"200":
description: Response
schema:
items:
$ref: '#/definitions/schema.P2PNodesResponse'
type: array
summary: Returns available P2P nodes
/api/p2p/token:
get:
responses:
"200":
description: Response
schema:
type: string
summary: Show the P2P token
/backend/monitor:
get:
parameters:
- description: Backend statistics request
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.BackendMonitorRequest'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/proto.StatusResponse'
summary: Backend monitor endpoint
/backend/shutdown:
post:
parameters:
- description: Backend statistics request
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.BackendMonitorRequest'
responses: {}
summary: Backend monitor endpoint
/backends:
get:
responses:
"200":
description: Response
schema:
items:
$ref: '#/definitions/gallery.GalleryBackend'
type: array
summary: List all Backends
/backends/apply:
post:
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/localai.GalleryBackend'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.BackendResponse'
summary: Install backends to LocalAI.
/backends/available:
get:
responses:
"200":
description: Response
schema:
items:
$ref: '#/definitions/gallery.GalleryBackend'
type: array
summary: List all available Backends
/backends/delete/{name}:
post:
parameters:
- description: Backend name
in: path
name: name
required: true
type: string
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.BackendResponse'
summary: delete backends from LocalAI.
/backends/galleries:
get:
responses:
"200":
description: Response
schema:
items:
$ref: '#/definitions/config.Gallery'
type: array
summary: List all Galleries
/backends/jobs:
get:
responses:
"200":
description: Response
schema:
additionalProperties:
$ref: '#/definitions/services.GalleryOpStatus'
type: object
summary: Returns all the jobs status progress
/backends/jobs/{uuid}:
get:
responses:
"200":
description: Response
schema:
$ref: '#/definitions/services.GalleryOpStatus'
summary: Returns the job status
/metrics:
get:
parameters:
- description: Gallery details
in: body
name: request
required: true
schema:
$ref: '#/definitions/config.Gallery'
responses: {}
summary: Prometheus metrics endpoint
/models/apply:
post:
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/localai.GalleryModel'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.GalleryResponse'
summary: Install models to LocalAI.
/models/available:
get:
responses:
"200":
description: Response
schema:
items:
$ref: '#/definitions/gallery.GalleryModel'
type: array
summary: List installable models.
/models/delete/{name}:
post:
parameters:
- description: Model name
in: path
name: name
required: true
type: string
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.GalleryResponse'
summary: delete models to LocalAI.
/models/galleries:
get:
responses:
"200":
description: Response
schema:
items:
$ref: '#/definitions/config.Gallery'
type: array
summary: List all Galleries
/models/jobs:
get:
responses:
"200":
description: Response
schema:
additionalProperties:
$ref: '#/definitions/services.GalleryOpStatus'
type: object
summary: Returns all the jobs status progress
/models/jobs/{uuid}:
get:
responses:
"200":
description: Response
schema:
$ref: '#/definitions/services.GalleryOpStatus'
summary: Returns the job status
/system:
get:
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.SystemInformationResponse'
summary: Show the LocalAI instance information
/tokenMetrics:
get:
consumes:
- application/json
produces:
- audio/x-wav
responses:
"200":
description: generated audio/wav file
schema:
type: string
summary: Get TokenMetrics for Active Slot.
/tts:
post:
consumes:
- application/json
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.TTSRequest'
produces:
- audio/x-wav
responses:
"200":
description: generated audio/wav file
schema:
type: string
summary: Generates audio from the input text.
/v1/audio/speech:
post:
consumes:
- application/json
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.TTSRequest'
produces:
- audio/x-wav
responses:
"200":
description: generated audio/wav file
schema:
type: string
summary: Generates audio from the input text.
/v1/audio/transcriptions:
post:
consumes:
- multipart/form-data
parameters:
- description: model
in: formData
name: model
required: true
type: string
- description: file
in: formData
name: file
required: true
type: file
responses:
"200":
description: Response
schema:
additionalProperties:
type: string
type: object
summary: Transcribes audio into the input language.
/v1/chat/completions:
post:
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.OpenAIRequest'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.OpenAIResponse'
summary: Generate a chat completions for a given prompt and model.
/v1/completions:
post:
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.OpenAIRequest'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.OpenAIResponse'
summary: Generate completions for a given prompt and model.
/v1/detection:
post:
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.DetectionRequest'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.DetectionResponse'
summary: Detects objects in the input image.
/v1/edits:
post:
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.OpenAIRequest'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.OpenAIResponse'
summary: OpenAI edit endpoint
/v1/embeddings:
post:
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.OpenAIRequest'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.OpenAIResponse'
summary: Get a vector representation of a given input that can be easily consumed
by machine learning models and algorithms.
/v1/images/generations:
post:
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.OpenAIRequest'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.OpenAIResponse'
summary: Creates an image given a prompt.
/v1/models:
get:
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.ModelsDataResponse'
summary: List and describe the various models available in the API.
/v1/rerank:
post:
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.JINARerankRequest'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.JINARerankResponse'
summary: Reranks a list of phrases by relevance to a given text query.
/v1/sound-generation:
post:
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.ElevenLabsSoundGenerationRequest'
responses:
"200":
description: Response
schema:
type: string
summary: Generates audio from the input text.
/v1/text-to-speech/{voice-id}:
post:
parameters:
- description: Account ID
in: path
name: voice-id
required: true
type: string
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.TTSRequest'
responses:
"200":
description: Response
schema:
type: string
summary: Generates audio from the input text.
/v1/tokenMetrics:
get:
consumes:
- application/json
produces:
- audio/x-wav
responses:
"200":
description: generated audio/wav file
schema:
type: string
summary: Get TokenMetrics for Active Slot.
/v1/tokenize:
post:
parameters:
- description: Request
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.TokenizeRequest'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.TokenizeResponse'
summary: Tokenize the input.
/vad:
post:
consumes:
- application/json
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.VADRequest'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/proto.VADResponse'
summary: Detect voice fragments in an audio stream
/video:
post:
parameters:
- description: query params
in: body
name: request
required: true
schema:
$ref: '#/definitions/schema.VideoRequest'
responses:
"200":
description: Response
schema:
$ref: '#/definitions/schema.OpenAIResponse'
summary: Creates a video given a prompt.
securityDefinitions:
BearerAuth:
in: header
name: Authorization
type: apiKey
swagger: "2.0"