mirror of
https://github.com/decolua/9router.git
synced 2026-05-08 12:01:28 +00:00
- Speech-to-Text: full pipeline with sttCore handler, /v1/audio/transcriptions endpoint, sttConfig for OpenAI, Gemini, Groq, Deepgram, AssemblyAI, HuggingFace, NVIDIA Parakeet; new 9router-stt skill - Gemini TTS: add gemini provider with 30 prebuilt voices and TTS_PROVIDER_CONFIG - Usage: implement GLM (intl/cn) and MiniMax (intl/cn) quota fetchers; refactor Gemini CLI usage to use retrieveUserQuota with per-model buckets - Disabled models: lowdb-backed disabledModelsDb + /api/models/disabled route - Header search: reusable Zustand store (headerSearchStore) wired into Header - CLI tools: add Claude Cowork tool card and cowork-settings API - Providers: introduce mediaPriority sorting in getProvidersByKind, add Kimi K2.6, reorder hermes, drop qwen STT kind - UI: expand media-providers/[kind]/[id] page (+314), enhance OAuthModal, ModelSelectModal, ProviderTopology, ProxyPools, ProviderLimits - Assets: refresh provider PNGs (alicode, byteplus, cloudflare-ai, nvidia, ollama, vertex, volcengine-ark) and add aws-polly, fal-ai, jina-ai, recraft, runwayml, stability-ai, topaz, black-forest-labs
40 lines
1.4 KiB
JavaScript
40 lines
1.4 KiB
JavaScript
// Some thinking-mode providers (DeepSeek, Kimi, ...) require reasoning_content
|
|
// to be echoed back on assistant messages. Clients in OpenAI format don't send it,
|
|
// so we inject a non-empty placeholder to satisfy upstream validation.
|
|
|
|
const PLACEHOLDER = " ";
|
|
|
|
// Provider-level rules: keyed by executor.provider
|
|
const PROVIDER_RULES = {
|
|
deepseek: { scope: "all" }
|
|
};
|
|
|
|
// Model-level rules: matched by predicate against model id
|
|
const MODEL_RULES = [
|
|
{ match: m => m?.startsWith?.("kimi-"), scope: "toolCalls" },
|
|
{ match: m => m?.startsWith?.("deepseek-"), scope: "all" }
|
|
];
|
|
|
|
function shouldInject(message, scope) {
|
|
if (message?.role !== "assistant") return false;
|
|
const rc = message.reasoning_content;
|
|
if (typeof rc === "string" && rc.length > 0) return false;
|
|
if (scope === "toolCalls") return Array.isArray(message.tool_calls) && message.tool_calls.length > 0;
|
|
return true;
|
|
}
|
|
|
|
function applyRule(body, rule) {
|
|
if (!rule || !body?.messages) return body;
|
|
const messages = body.messages.map(m =>
|
|
shouldInject(m, rule.scope) ? { ...m, reasoning_content: PLACEHOLDER } : m
|
|
);
|
|
return { ...body, messages };
|
|
}
|
|
|
|
export function injectReasoningContent({ provider, model, body }) {
|
|
const providerRule = PROVIDER_RULES[provider];
|
|
const modelRule = MODEL_RULES.find(r => r.match(model));
|
|
const rule = providerRule || modelRule;
|
|
return applyRule(body, rule);
|
|
}
|