feat: add OpenAI Compatible LLM provider for local models (#81)

Support Ollama, LM Studio, vLLM, and any OpenAI-compatible API
  via a configurable base URL. Reuses ChatOpenAI with custom baseURL,
  no new dependencies. Local models use direct JSON parsing instead
  of withStructuredOutput since many don't support function calling.

Co-authored-by: FasterOP <7832832+mmplisskin@useres.noreply.github.com>
This commit is contained in:
MaxP
2026-03-31 01:43:58 -07:00
committed by GitHub
parent 94a94dbb34
commit 8bcbf403d5
8 changed files with 109 additions and 19 deletions
+26 -6
View File
@@ -3,12 +3,13 @@ import { ChatGoogleGenerativeAI } from "@langchain/google-genai"
import { ChatMistralAI } from "@langchain/mistralai"
import { BaseMessage, HumanMessage } from "@langchain/core/messages"
export type LLMProvider = "openai" | "google" | "mistral"
export type LLMProvider = "openai" | "google" | "mistral" | "openai_compatible"
export interface LLMConfig {
provider: LLMProvider
apiKey: string
model: string
baseUrl?: string
}
export interface LLMSettings {
@@ -50,6 +51,15 @@ async function requestLLMUnified(config: LLMConfig, req: LLMRequest): Promise<LL
model: config.model,
temperature: temperature,
})
} else if (config.provider === "openai_compatible") {
model = new ChatOpenAI({
apiKey: config.apiKey || "not-needed",
model: config.model,
temperature: temperature,
configuration: {
baseURL: config.baseUrl?.trim(),
},
})
} else {
return {
output: {},
@@ -58,8 +68,6 @@ async function requestLLMUnified(config: LLMConfig, req: LLMRequest): Promise<LL
}
}
const structuredModel = model.withStructuredOutput(req.schema, { name: "transaction" })
let message_content: any = [{ type: "text", text: req.prompt }]
if (req.attachments && req.attachments.length > 0) {
const images = req.attachments.map((att) => ({
@@ -72,7 +80,15 @@ async function requestLLMUnified(config: LLMConfig, req: LLMRequest): Promise<LL
}
const messages: BaseMessage[] = [new HumanMessage({ content: message_content })]
const response = await structuredModel.invoke(messages)
let response: any
if (config.provider === "openai_compatible") {
const raw = await model.invoke(messages)
const text = typeof raw.content === "string" ? raw.content : raw.content.map((c: any) => c.text || "").join("")
response = JSON.parse(text.replace(/```(?:json)?\s*/g, "").trim())
} else {
const structuredModel = model.withStructuredOutput(req.schema, { name: "transaction" })
response = await structuredModel.invoke(messages)
}
return {
output: response,
@@ -89,8 +105,12 @@ async function requestLLMUnified(config: LLMConfig, req: LLMRequest): Promise<LL
export async function requestLLM(settings: LLMSettings, req: LLMRequest): Promise<LLMResponse> {
for (const config of settings.providers) {
if (!config.apiKey || !config.model) {
console.info("Skipping provider:", config.provider)
if (!config.model) {
console.info("Skipping provider:", config.provider, "(no model)")
continue
}
if (config.provider === "openai_compatible" ? !config.baseUrl : !config.apiKey) {
console.info("Skipping provider:", config.provider, "(not configured)")
continue
}
console.info("Use provider:", config.provider)
+2 -1
View File
@@ -41,7 +41,8 @@ export default async function UnsortedPage() {
{config.selfHosted.isEnabled &&
!settings.openai_api_key &&
!settings.google_api_key &&
!settings.mistral_api_key && (
!settings.mistral_api_key &&
!settings.openai_compatible_base_url && (
<Alert>
<Settings className="h-4 w-4 mt-2" />
<div className="flex flex-row justify-between pt-2">
+3 -1
View File
@@ -16,7 +16,9 @@ export async function selfHostedGetStartedAction(formData: FormData) {
const apiKeys = [
"openai_api_key",
"google_api_key",
"mistral_api_key"
"mistral_api_key",
"openai_compatible_api_key",
"openai_compatible_base_url",
]
for (const key of apiKeys) {
@@ -68,6 +68,16 @@ export default function SelfHostedSetupFormClient({ defaultProvider, defaultApiK
</a>
</small>
</div>
{selected.baseUrlName && (
<div>
<FormInput
title={`${selected.label} Base URL`}
name={selected.baseUrlName}
defaultValue={selected.defaultBaseUrl}
placeholder="http://localhost:11434/v1"
/>
</div>
)}
<Button type="submit" className="w-auto p-6">
Get Started
</Button>
+21 -8
View File
@@ -30,7 +30,7 @@ import { PROVIDERS } from "@/lib/llm-providers";
function getInitialProviderOrder(settings: Record<string, string>) {
let order: string[] = []
if (!settings.llm_providers) {
order = ['openai', 'google', 'mistral']
order = ['openai', 'google', 'mistral', 'openai_compatible']
} else {
order = settings.llm_providers.split(",").map(p => p.trim())
}
@@ -51,17 +51,20 @@ export default function LLMSettingsForm({
// Controlled values for each provider
const [providerValues, setProviderValues] = useState(() => {
const values: Record<string, { apiKey: string; model: string }> = {}
const values: Record<string, { apiKey: string; model: string; baseUrl: string }> = {}
PROVIDERS.forEach((provider) => {
values[provider.key] = {
apiKey: settings[provider.apiKeyName],
model: settings[provider.modelName] || provider.defaultModelName,
baseUrl: provider.baseUrlName
? (settings[provider.baseUrlName] || provider.defaultBaseUrl || "")
: "",
}
})
return values
})
function handleProviderValueChange(providerKey: string, field: "apiKey" | "model", value: string) {
function handleProviderValueChange(providerKey: string, field: "apiKey" | "model" | "baseUrl", value: string) {
setProviderValues((prev) => ({
...prev,
[providerKey]: {
@@ -141,8 +144,8 @@ export default function LLMSettingsForm({
type DndProviderBlocksProps = {
providerOrder: string[];
setProviderOrder: React.Dispatch<React.SetStateAction<string[]>>;
providerValues: Record<string, { apiKey: string; model: string }>;
handleProviderValueChange: (providerKey: string, field: "apiKey" | "model", value: string) => void;
providerValues: Record<string, { apiKey: string; model: string; baseUrl: string }>;
handleProviderValueChange: (providerKey: string, field: "apiKey" | "model" | "baseUrl", value: string) => void;
};
function DndProviderBlocks({ providerOrder, setProviderOrder, providerValues, handleProviderValueChange }: DndProviderBlocksProps) {
@@ -176,8 +179,8 @@ type SortableProviderBlockProps = {
id: string;
idx: number;
providerKey: string;
value: { apiKey: string; model: string };
handleValueChange: (providerKey: string, field: "apiKey" | "model", value: string) => void;
value: { apiKey: string; model: string; baseUrl: string };
handleValueChange: (providerKey: string, field: "apiKey" | "model" | "baseUrl", value: string) => void;
};
function SortableProviderBlock({ id, idx, providerKey, value, handleValueChange }: SortableProviderBlockProps) {
@@ -214,7 +217,7 @@ function SortableProviderBlock({ id, idx, providerKey, value, handleValueChange
value={value.apiKey}
onChange={e => handleValueChange(provider.key, "apiKey", e.target.value)}
className="flex-1 border rounded px-2 py-1"
placeholder="API key"
placeholder={provider.baseUrlName ? "API key (optional)" : "API key"}
/>
<input
type="text"
@@ -225,6 +228,16 @@ function SortableProviderBlock({ id, idx, providerKey, value, handleValueChange
placeholder="Model name"
/>
</div>
{provider.baseUrlName && (
<input
type="text"
name={provider.baseUrlName}
value={value.baseUrl}
onChange={e => handleValueChange(provider.key, "baseUrl", e.target.value)}
className="w-full border rounded px-2 py-1"
placeholder="Base URL (e.g. http://localhost:11434/v1)"
/>
)}
{provider.apiDoc && (
<small className="text-muted-foreground">
Get your API key from{" "}
+4 -1
View File
@@ -12,7 +12,10 @@ export const settingsFormSchema = z.object({
google_model_name: z.string().default("gemini-2.5-flash"),
mistral_api_key: z.string().optional(),
mistral_model_name: z.string().default("mistral-medium-latest"),
llm_providers: z.string().default('openai,google,mistral'),
openai_compatible_api_key: z.string().optional(),
openai_compatible_model_name: z.string().optional(),
openai_compatible_base_url: z.string().optional(),
llm_providers: z.string().default('openai,google,mistral,openai_compatible'),
prompt_analyse_new_file: z.string().optional(),
is_welcome_message_hidden: z.string().optional(),
})
+33 -1
View File
@@ -1,4 +1,19 @@
export const PROVIDERS = [
export interface ProviderMeta {
key: string
label: string
apiKeyName: string
modelName: string
defaultModelName: string
baseUrlName?: string
defaultBaseUrl?: string
apiDoc: string
apiDocLabel: string
placeholder: string
help: { url: string; label: string }
logo: string
}
export const PROVIDERS: ProviderMeta[] = [
{
key: "openai",
label: "OpenAI",
@@ -44,4 +59,21 @@ export const PROVIDERS = [
},
logo: "/logo/mistral.svg"
},
{
key: "openai_compatible",
label: "OpenAI Compatible",
apiKeyName: "openai_compatible_api_key",
modelName: "openai_compatible_model_name",
defaultModelName: "",
baseUrlName: "openai_compatible_base_url",
defaultBaseUrl: "http://localhost:11434/v1",
apiDoc: "",
apiDocLabel: "",
placeholder: "(optional)",
help: {
url: "https://github.com/ollama/ollama/blob/main/docs/openai.md",
label: "Works with Ollama, LM Studio, vLLM, LocalAI"
},
logo: "/logo/openai.svg"
},
]
+10 -1
View File
@@ -9,7 +9,7 @@ export type SettingsMap = Record<string, string>
* Helper to extract LLM provider settings from SettingsMap.
*/
export function getLLMSettings(settings: SettingsMap) {
const priorities = (settings.llm_providers || "openai,google,mistral").split(",").map(p => p.trim()).filter(Boolean)
const priorities = (settings.llm_providers || "openai,google,mistral,openai_compatible").split(",").map(p => p.trim()).filter(Boolean)
const providers = priorities.map((provider) => {
if (provider === "openai") {
@@ -33,6 +33,15 @@ export function getLLMSettings(settings: SettingsMap) {
model: settings.mistral_model_name || PROVIDERS[2]['defaultModelName'],
}
}
if (provider === "openai_compatible") {
const providerMeta = PROVIDERS.find(p => p.key === "openai_compatible")
return {
provider: provider as LLMProvider,
apiKey: settings.openai_compatible_api_key || "",
model: settings.openai_compatible_model_name || "",
baseUrl: settings.openai_compatible_base_url || providerMeta?.defaultBaseUrl || "",
}
}
return null
}).filter((provider): provider is NonNullable<typeof provider> => provider !== null)