59 lines
1.7 KiB
JavaScript
59 lines
1.7 KiB
JavaScript
/**
|
|
* OpenAI-kompatible Chat-API (OpenAI, Azure OpenAI, Ollama, LM Studio, vLLM, …).
|
|
*
|
|
* Umgebung:
|
|
* LANGUAGE_COURSE_LLM_API_KEY oder OPENAI_API_KEY
|
|
* LANGUAGE_COURSE_LLM_BASE_URL (optional, z. B. http://127.0.0.1:11434/v1 für Ollama)
|
|
* LANGUAGE_COURSE_LLM_MODEL (optional, Default: gpt-4o-mini)
|
|
*/
|
|
import OpenAI from 'openai';
|
|
|
|
export function getLlmEnv() {
|
|
const apiKey =
|
|
process.env.LANGUAGE_COURSE_LLM_API_KEY ||
|
|
process.env.OPENAI_API_KEY ||
|
|
'';
|
|
const baseURL = process.env.LANGUAGE_COURSE_LLM_BASE_URL || undefined;
|
|
const model = process.env.LANGUAGE_COURSE_LLM_MODEL || 'gpt-4o-mini';
|
|
return { apiKey, baseURL, model };
|
|
}
|
|
|
|
export function isLlmConfigured() {
|
|
return Boolean(getLlmEnv().apiKey);
|
|
}
|
|
|
|
export function getOpenAiClient() {
|
|
const { apiKey, baseURL } = getLlmEnv();
|
|
if (!apiKey) return null;
|
|
return new OpenAI({
|
|
apiKey,
|
|
baseURL,
|
|
});
|
|
}
|
|
|
|
/**
|
|
* @param {import('openai').ChatCompletionMessageParam[]} messages
|
|
* @param {{ model?: string; temperature?: number; maxTokens?: number }} [opts]
|
|
*/
|
|
export async function chatComplete(messages, opts = {}) {
|
|
const client = getOpenAiClient();
|
|
if (!client) {
|
|
throw new Error(
|
|
'Kein LLM konfiguriert: LANGUAGE_COURSE_LLM_API_KEY oder OPENAI_API_KEY setzen (siehe README).'
|
|
);
|
|
}
|
|
const { model, temperature = 0.6, maxTokens = 2048 } = opts;
|
|
const m = model || getLlmEnv().model;
|
|
const res = await client.chat.completions.create({
|
|
model: m,
|
|
messages,
|
|
temperature,
|
|
max_tokens: maxTokens,
|
|
});
|
|
const text = res.choices[0]?.message?.content?.trim() || '';
|
|
if (!text) {
|
|
throw new Error('Leere Antwort vom Modell.');
|
|
}
|
|
return text;
|
|
}
|