feat: add STT support, Gemini TTS, and expand usage tracking

- Speech-to-Text: full pipeline with sttCore handler, /v1/audio/transcriptions
  endpoint, sttConfig for OpenAI, Gemini, Groq, Deepgram, AssemblyAI,
  HuggingFace, NVIDIA Parakeet; new 9router-stt skill
- Gemini TTS: add gemini provider with 30 prebuilt voices and TTS_PROVIDER_CONFIG
- Usage: implement GLM (intl/cn) and MiniMax (intl/cn) quota fetchers; refactor
  Gemini CLI usage to use retrieveUserQuota with per-model buckets
- Disabled models: lowdb-backed disabledModelsDb + /api/models/disabled route
- Header search: reusable Zustand store (headerSearchStore) wired into Header
- CLI tools: add Claude Cowork tool card and cowork-settings API
- Providers: introduce mediaPriority sorting in getProvidersByKind, add
  Kimi K2.6, reorder hermes, drop qwen STT kind
- UI: expand media-providers/[kind]/[id] page (+314), enhance OAuthModal,
  ModelSelectModal, ProviderTopology, ProxyPools, ProviderLimits
- Assets: refresh provider PNGs (alicode, byteplus, cloudflare-ai, nvidia,
  ollama, vertex, volcengine-ark) and add aws-polly, fal-ai, jina-ai, recraft,
  runwayml, stability-ai, topaz, black-forest-labs
This commit is contained in:
decolua
2026-05-05 10:32:59 +07:00
parent bfb7d42164
commit d4bc42e1f5
67 changed files with 2930 additions and 234 deletions

View File

@@ -156,6 +156,7 @@ export const PROVIDER_MODELS = {
{ id: "gpt-5.3-codex", name: "GPT 5.3 Codex" },
],
kmc: [ // Kimi Coding
{ id: "kimi-k2.6", name: "Kimi K2.6" },
{ id: "kimi-k2.5", name: "Kimi K2.5" },
{ id: "kimi-k2.5-thinking", name: "Kimi K2.5 Thinking" },
{ id: "kimi-latest", name: "Kimi Latest" },
@@ -233,6 +234,10 @@ export const PROVIDER_MODELS = {
{ id: "tts-1", name: "TTS-1", type: "tts" },
{ id: "tts-1-hd", name: "TTS-1 HD", type: "tts" },
{ id: "gpt-4o-mini-tts", name: "GPT-4o Mini TTS", type: "tts" },
// STT models
{ id: "whisper-1", name: "Whisper 1", type: "stt", params: ["language", "response_format", "temperature", "prompt"] },
{ id: "gpt-4o-transcribe", name: "GPT-4o Transcribe", type: "stt", params: ["language", "response_format", "temperature", "prompt"] },
{ id: "gpt-4o-mini-transcribe", name: "GPT-4o Mini Transcribe", type: "stt", params: ["language", "response_format", "temperature", "prompt"] },
// Image models
{ id: "gpt-image-1", name: "GPT Image 1", type: "image", params: ["n", "size", "quality", "response_format"] },
{ id: "dall-e-3", name: "DALL-E 3", type: "image", params: ["size", "quality", "style", "response_format"] },
@@ -267,6 +272,11 @@ export const PROVIDER_MODELS = {
{ id: "gemini-3.1-flash-image-preview", name: "Gemini 3.1 Flash Image (Nano Banana 2)", type: "image", params: [] },
{ id: "gemini-3-pro-image-preview", name: "Gemini 3 Pro Image (Nano Banana Pro)", type: "image", params: [] },
{ id: "gemini-2.5-flash-image", name: "Gemini 2.5 Flash Image (Nano Banana)", type: "image", params: [] },
// STT models (multimodal generateContent)
{ id: "gemini-2.5-pro", name: "Gemini 2.5 Pro (Best)", type: "stt", params: ["language", "prompt"] },
{ id: "gemini-2.5-flash", name: "Gemini 2.5 Flash", type: "stt", params: ["language", "prompt"] },
{ id: "gemini-2.5-flash-lite", name: "Gemini 2.5 Flash Lite (Cheapest)", type: "stt", params: ["language", "prompt"] },
{ id: "gemini-2.0-flash", name: "Gemini 2.0 Flash", type: "stt", params: ["language", "prompt"] },
],
openrouter: [
// Embedding models
@@ -301,6 +311,7 @@ export const PROVIDER_MODELS = {
{ id: "glm-4.5-air", name: "GLM-4.5-Air" },
],
kimi: [
{ id: "kimi-k2.6", name: "Kimi K2.6" },
{ id: "kimi-k2.5", name: "Kimi K2.5" },
{ id: "kimi-k2.5-thinking", name: "Kimi K2.5 Thinking" },
{ id: "kimi-latest", name: "Kimi Latest" },
@@ -402,6 +413,10 @@ export const PROVIDER_MODELS = {
{ id: "meta-llama/llama-4-maverick-17b-128e-instruct", name: "Llama 4 Maverick" },
{ id: "qwen/qwen3-32b", name: "Qwen3 32B" },
{ id: "openai/gpt-oss-120b", name: "GPT-OSS 120B" },
// STT models
{ id: "whisper-large-v3", name: "Whisper Large v3", type: "stt", params: ["language", "response_format", "temperature", "prompt"] },
{ id: "whisper-large-v3-turbo", name: "Whisper Large v3 Turbo", type: "stt", params: ["language", "response_format", "temperature", "prompt"] },
{ id: "distil-whisper-large-v3-en", name: "Distil Whisper Large v3 EN", type: "stt", params: ["language", "response_format", "temperature", "prompt"] },
],
xai: [
{ id: "grok-4", name: "Grok 4" },
@@ -450,6 +465,8 @@ export const PROVIDER_MODELS = {
{ id: "minimaxai/minimax-m2.7", name: "Minimax M2.7" },
{ id: "z-ai/glm4.7", name: "GLM 4.7" },
{ id: "nvidia/nv-embedqa-e5-v5", name: "NV EmbedQA E5 v5", type: "embedding" },
// STT models
{ id: "nvidia/parakeet-ctc-1.1b-asr", name: "Parakeet CTC 1.1B", type: "stt", params: ["language"] },
],
nebius: [
{ id: "meta-llama/Llama-3.3-70B-Instruct", name: "Llama 3.3 70B Instruct" },
@@ -555,6 +572,18 @@ export const PROVIDER_MODELS = {
huggingface: [
{ id: "black-forest-labs/FLUX.1-schnell", name: "FLUX.1 Schnell", type: "image", params: [] },
{ id: "stabilityai/stable-diffusion-xl-base-1.0", name: "SDXL Base 1.0", type: "image", params: [] },
// STT models
{ id: "openai/whisper-large-v3", name: "Whisper Large v3 (HF)", type: "stt", params: ["language"] },
{ id: "openai/whisper-small", name: "Whisper Small (HF)", type: "stt", params: ["language"] },
],
deepgram: [
{ id: "nova-3", name: "Nova 3", type: "stt", params: ["language"] },
{ id: "nova-2", name: "Nova 2", type: "stt", params: ["language"] },
{ id: "whisper-large", name: "Whisper Large", type: "stt", params: ["language"] },
],
assemblyai: [
{ id: "universal-3-pro", name: "Universal 3 Pro", type: "stt", params: ["language"] },
{ id: "universal-2", name: "Universal 2", type: "stt", params: ["language"] },
],
"fal-ai": [
{ id: "fal-ai/flux/schnell", name: "FLUX Schnell", type: "image", params: ["n", "size"] },

View File

@@ -24,6 +24,15 @@ const VOICES_STANDARD = v("alloy", "ash", "coral", "echo", "fable", "nova", "ony
// 13 voices for gpt-4o-mini-tts
const VOICES_FULL = v("alloy", "ash", "ballad", "cedar", "coral", "echo", "fable", "marin", "nova", "onyx", "sage", "shimmer", "verse");
// Gemini prebuilt voices (30 voices, multi-language auto-detect)
const GEMINI_VOICES = [
"Zephyr", "Puck", "Charon", "Kore", "Fenrir", "Leda", "Orus", "Aoede",
"Callirrhoe", "Autonoe", "Enceladus", "Iapetus", "Umbriel", "Algieba",
"Despina", "Erinome", "Algenib", "Rasalgethi", "Laomedeia", "Achernar",
"Alnilam", "Schedar", "Gacrux", "Pulcherrima", "Achird", "Zubenelgenubi",
"Vindemiatrix", "Sadachbia", "Sadaltager", "Sulafat",
].map((id) => ({ id, name: id, type: "tts" }));
// ── TTS Config (config-driven, single source of truth) ─────────────────────
export const TTS_MODELS_CONFIG = {
openai: {
@@ -85,6 +94,17 @@ export const TTS_MODELS_CONFIG = {
"google-tts": {
defaults: GOOGLE_TTS_LANGUAGES,
},
gemini: {
models: [
{ id: "gemini-2.5-flash-preview-tts", name: "Gemini 2.5 Flash TTS", type: "tts" },
{ id: "gemini-2.5-pro-preview-tts", name: "Gemini 2.5 Pro TTS", type: "tts" },
],
voices: {
"gemini-2.5-flash-preview-tts": GEMINI_VOICES,
"gemini-2.5-pro-preview-tts": GEMINI_VOICES,
},
allVoices: GEMINI_VOICES,
},
};
// ── Helper: get voices for a specific model ────────────────────────────────

View File

@@ -0,0 +1,194 @@
import { Buffer } from "node:buffer";
import { createErrorResult } from "../utils/error.js";
import { HTTP_STATUS } from "../config/runtimeConfig.js";
import { AI_PROVIDERS } from "../../src/shared/constants/providers.js";
// Build auth headers from sttConfig + token
function buildAuthHeaders(cfg, token) {
if (!token) return {};
switch (cfg.authHeader) {
case "bearer": return { "Authorization": `Bearer ${token}` };
case "token": return { "Authorization": `Token ${token}` };
case "x-api-key": return { "x-api-key": token };
case "key": return { "Authorization": `Key ${token}` };
default: return { "Authorization": `Bearer ${token}` };
}
}
// Map browser file MIME / ext → audio MIME for binary formats (deepgram/HF)
function resolveAudioContentType(file) {
const t = (file.type || "").toLowerCase();
if (t.startsWith("audio/")) return t;
const name = typeof file.name === "string" ? file.name.toLowerCase() : "";
const ext = name.includes(".") ? name.split(".").pop() : "";
const map = { mp3: "audio/mpeg", mp4: "audio/mp4", m4a: "audio/mp4", wav: "audio/wav", ogg: "audio/ogg", flac: "audio/flac", webm: "audio/webm", aac: "audio/aac", opus: "audio/opus" };
return map[ext] || "application/octet-stream";
}
async function upstreamError(res) {
let txt = "";
try { txt = await res.text(); } catch {}
let msg = txt || `Upstream error (${res.status})`;
try { const j = JSON.parse(txt); msg = j?.error?.message || j?.error || j?.message || msg; } catch {}
return createErrorResult(res.status, typeof msg === "string" ? msg : JSON.stringify(msg));
}
// Deepgram: raw binary POST + model query param
async function transcribeDeepgram(cfg, file, model, token, formData) {
const url = new URL(cfg.baseUrl);
url.searchParams.set("model", model);
url.searchParams.set("smart_format", "true");
url.searchParams.set("punctuate", "true");
const lang = formData.get("language");
if (typeof lang === "string" && lang.trim()) url.searchParams.set("language", lang.trim());
else url.searchParams.set("detect_language", "true");
const buf = await file.arrayBuffer();
const res = await fetch(url, {
method: "POST",
headers: { ...buildAuthHeaders(cfg, token), "Content-Type": resolveAudioContentType(file) },
body: buf,
});
if (!res.ok) return upstreamError(res);
const data = await res.json();
const text = data.results?.channels?.[0]?.alternatives?.[0]?.transcript ?? "";
return jsonResponse({ text });
}
// AssemblyAI: upload → submit → poll (max 120s)
async function transcribeAssemblyAI(cfg, file, model, token) {
const auth = buildAuthHeaders(cfg, token);
const buf = await file.arrayBuffer();
const up = await fetch("https://api.assemblyai.com/v2/upload", {
method: "POST", headers: { ...auth, "Content-Type": "application/octet-stream" }, body: buf,
});
if (!up.ok) return upstreamError(up);
const { upload_url } = await up.json();
const sub = await fetch(cfg.baseUrl, {
method: "POST",
headers: { ...auth, "Content-Type": "application/json" },
body: JSON.stringify({ audio_url: upload_url, speech_models: [model], language_detection: true }),
});
if (!sub.ok) return upstreamError(sub);
const { id } = await sub.json();
const start = Date.now();
while (Date.now() - start < 120_000) {
await new Promise((r) => setTimeout(r, 2000));
const poll = await fetch(`${cfg.baseUrl}/${id}`, { headers: auth });
if (!poll.ok) continue;
const r = await poll.json();
if (r.status === "completed") return jsonResponse({ text: r.text || "" });
if (r.status === "error") return createErrorResult(500, r.error || "AssemblyAI failed");
}
return createErrorResult(504, "AssemblyAI timeout after 120s");
}
// Nvidia NIM: multipart, normalize response
async function transcribeNvidia(cfg, file, model, token) {
const fd = new FormData();
fd.append("file", file, file.name || "audio.wav");
fd.append("model", model);
const res = await fetch(cfg.baseUrl, { method: "POST", headers: buildAuthHeaders(cfg, token), body: fd });
if (!res.ok) return upstreamError(res);
const data = await res.json();
return jsonResponse({ text: data.text || data.transcript || "" });
}
// Gemini: generateContent with inline_data audio + transcription prompt
async function transcribeGemini(cfg, file, model, token, formData) {
const buf = await file.arrayBuffer();
const b64 = Buffer.from(buf).toString("base64");
const mime = resolveAudioContentType(file);
const lang = formData.get("language");
const userPrompt = formData.get("prompt");
let promptText = userPrompt && typeof userPrompt === "string" && userPrompt.trim()
? userPrompt.trim()
: "Generate a transcript of the speech. Return only the transcribed text, no commentary.";
if (typeof lang === "string" && lang.trim()) promptText += ` Language: ${lang.trim()}.`;
const url = `${cfg.baseUrl}/${model}:generateContent?key=${token}`;
const res = await fetch(url, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
contents: [{ parts: [{ text: promptText }, { inline_data: { mime_type: mime, data: b64 } }] }],
}),
});
if (!res.ok) return upstreamError(res);
const data = await res.json();
const text = data?.candidates?.[0]?.content?.parts?.map((p) => p.text).filter(Boolean).join("") || "";
return jsonResponse({ text });
}
// HuggingFace: POST raw binary to {baseUrl}/{model_id}
async function transcribeHuggingFace(cfg, file, model, token) {
if (model.includes("..") || model.includes("//")) return createErrorResult(400, "Invalid model ID");
const url = `${cfg.baseUrl.replace(/\/+$/, "")}/${model}`;
const buf = await file.arrayBuffer();
const res = await fetch(url, {
method: "POST",
headers: { ...buildAuthHeaders(cfg, token), "Content-Type": resolveAudioContentType(file) },
body: buf,
});
if (!res.ok) return upstreamError(res);
const data = await res.json();
return jsonResponse({ text: data.text || "" });
}
// Default: OpenAI/Groq/Whisper-compatible multipart
async function transcribeOpenAICompatible(cfg, file, model, token, formData) {
const fd = new FormData();
fd.append("file", file, file.name || "audio.wav");
fd.append("model", model);
for (const k of ["language", "prompt", "response_format", "temperature"]) {
const v = formData.get(k);
if (v !== null && v !== undefined && v !== "") fd.append(k, v);
}
const res = await fetch(cfg.baseUrl, { method: "POST", headers: buildAuthHeaders(cfg, token), body: fd });
if (!res.ok) return upstreamError(res);
const ct = res.headers.get("content-type") || "application/json";
const txt = await res.text();
return { success: true, response: new Response(txt, { status: 200, headers: { "Content-Type": ct, "Access-Control-Allow-Origin": "*" } }) };
}
function jsonResponse(obj) {
return {
success: true,
response: new Response(JSON.stringify(obj), {
status: 200,
headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" },
}),
};
}
/**
* STT core handler — dispatch by sttConfig.format.
* @returns {Promise<{success, response, status?, error?}>}
*/
export async function handleSttCore({ provider, model, formData, credentials }) {
const file = formData.get("file");
if (!file) return createErrorResult(HTTP_STATUS.BAD_REQUEST, "Missing required field: file");
const cfg = AI_PROVIDERS[provider]?.sttConfig;
if (!cfg) return createErrorResult(HTTP_STATUS.BAD_REQUEST, `Provider '${provider}' does not support STT`);
const token = cfg.authType === "none" ? null : (credentials?.apiKey || credentials?.accessToken);
if (cfg.authType !== "none" && !token) {
return createErrorResult(HTTP_STATUS.UNAUTHORIZED, `No credentials for STT provider: ${provider}`);
}
try {
switch (cfg.format) {
case "deepgram": return await transcribeDeepgram(cfg, file, model, token, formData);
case "assemblyai": return await transcribeAssemblyAI(cfg, file, model, token);
case "nvidia-asr": return await transcribeNvidia(cfg, file, model, token);
case "huggingface-asr": return await transcribeHuggingFace(cfg, file, model, token);
case "gemini-stt": return await transcribeGemini(cfg, file, model, token, formData);
default: return await transcribeOpenAICompatible(cfg, file, model, token, formData);
}
} catch (err) {
return createErrorResult(HTTP_STATUS.BAD_GATEWAY, err.message || "STT request failed");
}
}

View File

@@ -48,16 +48,16 @@ function createTtsResponse(base64Audio, format, responseFormat) {
*
* @returns {Promise<{success, response, status?, error?}>}
*/
export async function handleTtsCore({ provider, model, input, credentials, responseFormat = "mp3" }) {
export async function handleTtsCore({ provider, model, input, credentials, responseFormat = "mp3", language }) {
if (!input?.trim()) {
return createErrorResult(HTTP_STATUS.BAD_REQUEST, "Missing required field: input");
}
try {
// Special-case adapters (google-tts, edge-tts, local-device, elevenlabs, openai, openrouter)
// Special-case adapters (google-tts, edge-tts, local-device, elevenlabs, openai, openrouter, gemini)
const adapter = getTtsAdapter(provider);
if (adapter) {
const result = await adapter.synthesize(input.trim(), model, credentials, responseFormat);
const result = await adapter.synthesize(input.trim(), model, credentials, responseFormat, { language });
// Adapter may return a full {success, response} (legacy) or {base64, format}
if (result.success !== undefined) return result;
return createTtsResponse(result.base64, result.format, responseFormat);

View File

@@ -0,0 +1,117 @@
// Gemini TTS — generateContent with AUDIO modality returns PCM L16, wrap as WAV
import { Buffer } from "node:buffer";
const DEFAULT_MODEL = "gemini-2.5-flash-preview-tts";
const DEFAULT_VOICE = "Kore";
const KNOWN_MODELS = ["gemini-2.5-flash-preview-tts", "gemini-2.5-pro-preview-tts"];
// Parse "model/voice" — if input doesn't match a known TTS model, treat it as voice with default model
function parseGeminiModelVoice(input) {
if (!input) return { modelId: DEFAULT_MODEL, voiceId: DEFAULT_VOICE };
for (const id of KNOWN_MODELS) {
if (input === id) return { modelId: id, voiceId: DEFAULT_VOICE };
if (input.startsWith(`${id}/`)) return { modelId: id, voiceId: input.slice(id.length + 1) };
}
return { modelId: DEFAULT_MODEL, voiceId: input };
}
// Gemini returns PCM 16-bit signed mono @ 24kHz
const SAMPLE_RATE = 24000;
const CHANNELS = 1;
const BITS_PER_SAMPLE = 16;
// Build WAV header for raw PCM payload
function pcmToWav(pcmBuffer) {
const dataSize = pcmBuffer.length;
const byteRate = SAMPLE_RATE * CHANNELS * BITS_PER_SAMPLE / 8;
const blockAlign = CHANNELS * BITS_PER_SAMPLE / 8;
const header = Buffer.alloc(44);
header.write("RIFF", 0);
header.writeUInt32LE(36 + dataSize, 4);
header.write("WAVE", 8);
header.write("fmt ", 12);
header.writeUInt32LE(16, 16);
header.writeUInt16LE(1, 20);
header.writeUInt16LE(CHANNELS, 22);
header.writeUInt32LE(SAMPLE_RATE, 24);
header.writeUInt32LE(byteRate, 28);
header.writeUInt16LE(blockAlign, 32);
header.writeUInt16LE(BITS_PER_SAMPLE, 34);
header.write("data", 36);
header.writeUInt32LE(dataSize, 40);
return Buffer.concat([header, pcmBuffer]);
}
// Build TTS prompt: add "Say [in {language}]:" prefix to force TTS mode
function buildPrompt(text, language) {
if (/:\s/.test(text)) return text; // user already provided style instruction
return language ? `Say in ${language}: ${text}` : `Say: ${text}`;
}
export default {
async synthesize(text, model, credentials, _responseFormat, opts = {}) {
if (!credentials?.apiKey) throw new Error("No Gemini API key configured");
const { modelId, voiceId } = parseGeminiModelVoice(model);
const url = `https://generativelanguage.googleapis.com/v1beta/models/${modelId}:generateContent?key=${credentials.apiKey}`;
const res = await fetch(url, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
contents: [{ parts: [{ text: buildPrompt(text, opts.language) }] }],
generationConfig: {
responseModalities: ["AUDIO"],
speechConfig: { voiceConfig: { prebuiltVoiceConfig: { voiceName: voiceId } } },
},
}),
});
if (!res.ok) {
const err = await res.json().catch(() => ({}));
throw new Error(err?.error?.message || `Gemini TTS failed: ${res.status}`);
}
const data = await res.json();
const b64 = data?.candidates?.[0]?.content?.parts?.find((p) => p.inlineData?.data)?.inlineData?.data;
if (!b64) {
const reason = data?.candidates?.[0]?.finishReason || data?.promptFeedback?.blockReason || "unknown";
throw new Error(`Gemini TTS returned no audio (finishReason: ${reason}, voice: ${voiceId}, model: ${modelId})`);
}
const wav = pcmToWav(Buffer.from(b64, "base64"));
return { base64: wav.toString("base64"), format: "wav" };
},
};
// Voice fetcher — return prebuilt voices (Gemini has no list API)
const PREBUILT_VOICES = [
{ id: "Zephyr", lang: "en", gender: "Female" },
{ id: "Puck", lang: "en", gender: "Male" },
{ id: "Charon", lang: "en", gender: "Male" },
{ id: "Kore", lang: "en", gender: "Female" },
{ id: "Fenrir", lang: "en", gender: "Male" },
{ id: "Leda", lang: "en", gender: "Female" },
{ id: "Orus", lang: "en", gender: "Male" },
{ id: "Aoede", lang: "en", gender: "Female" },
{ id: "Callirrhoe", lang: "en", gender: "Female" },
{ id: "Autonoe", lang: "en", gender: "Female" },
{ id: "Enceladus", lang: "en", gender: "Male" },
{ id: "Iapetus", lang: "en", gender: "Male" },
{ id: "Umbriel", lang: "en", gender: "Male" },
{ id: "Algieba", lang: "en", gender: "Male" },
{ id: "Despina", lang: "en", gender: "Female" },
{ id: "Erinome", lang: "en", gender: "Female" },
{ id: "Algenib", lang: "en", gender: "Male" },
{ id: "Rasalgethi", lang: "en", gender: "Male" },
{ id: "Laomedeia", lang: "en", gender: "Female" },
{ id: "Achernar", lang: "en", gender: "Female" },
{ id: "Alnilam", lang: "en", gender: "Male" },
{ id: "Schedar", lang: "en", gender: "Male" },
{ id: "Gacrux", lang: "en", gender: "Female" },
{ id: "Pulcherrima", lang: "en", gender: "Female" },
{ id: "Achird", lang: "en", gender: "Male" },
{ id: "Zubenelgenubi", lang: "en", gender: "Male" },
{ id: "Vindemiatrix", lang: "en", gender: "Female" },
{ id: "Sadachbia", lang: "en", gender: "Male" },
{ id: "Sadaltager", lang: "en", gender: "Male" },
{ id: "Sulafat", lang: "en", gender: "Female" },
];
export async function fetchGeminiVoices() {
return PREBUILT_VOICES.map((v) => ({ voice_id: v.id, name: v.id, labels: { language: v.lang, gender: v.gender } }));
}

View File

@@ -5,6 +5,7 @@ import localDevice, { fetchLocalDeviceVoices } from "./localDevice.js";
import elevenlabs, { fetchElevenLabsVoices } from "./elevenlabs.js";
import openai from "./openai.js";
import openrouter from "./openrouter.js";
import gemini, { fetchGeminiVoices } from "./gemini.js";
import { FORMAT_HANDLERS } from "./genericFormats.js";
import { parseModelVoice } from "./_base.js";
@@ -16,6 +17,7 @@ const SPECIAL_ADAPTERS = {
elevenlabs,
openai,
openrouter,
gemini,
};
export function getTtsAdapter(provider) {
@@ -41,7 +43,8 @@ export const VOICE_FETCHERS = {
"edge-tts": fetchEdgeTtsVoices,
"local-device": fetchLocalDeviceVoices,
elevenlabs: fetchElevenLabsVoices,
gemini: fetchGeminiVoices,
};
// Re-export for backward compat
export { fetchEdgeTtsVoices, fetchLocalDeviceVoices, fetchElevenLabsVoices };
export { fetchEdgeTtsVoices, fetchLocalDeviceVoices, fetchElevenLabsVoices, fetchGeminiVoices };

View File

@@ -11,6 +11,24 @@ const GITHUB_CONFIG = {
userAgent: "GitHubCopilotChat/0.26.7",
};
// GLM quota endpoints (region-aware)
const GLM_QUOTA_URLS = {
international: "https://api.z.ai/api/monitor/usage/quota/limit",
china: "https://open.bigmodel.cn/api/monitor/usage/quota/limit",
};
// MiniMax usage endpoints (try in order, fallback on transient errors)
const MINIMAX_USAGE_URLS = {
minimax: [
"https://www.minimax.io/v1/token_plan/remains",
"https://api.minimax.io/v1/api/openplatform/coding_plan/remains",
],
"minimax-cn": [
"https://www.minimaxi.com/v1/api/openplatform/coding_plan/remains",
"https://api.minimaxi.com/v1/api/openplatform/coding_plan/remains",
],
};
// Antigravity API config (from Quotio)
const ANTIGRAVITY_CONFIG = {
quotaApiUrl: "https://cloudcode-pa.googleapis.com/v1internal:fetchAvailableModels",
@@ -40,13 +58,13 @@ const CLAUDE_CONFIG = {
* @returns {Object} Usage data with quotas
*/
export async function getUsageForProvider(connection, proxyOptions = null) {
const { provider, accessToken, providerSpecificData } = connection;
const { provider, accessToken, apiKey, providerSpecificData } = connection;
switch (provider) {
case "github":
return await getGitHubUsage(accessToken, providerSpecificData, proxyOptions);
case "gemini-cli":
return await getGeminiUsage(accessToken, proxyOptions);
return await getGeminiUsage(accessToken, providerSpecificData, proxyOptions);
case "antigravity":
return await getAntigravityUsage(accessToken, providerSpecificData, proxyOptions);
case "claude":
@@ -61,6 +79,12 @@ export async function getUsageForProvider(connection, proxyOptions = null) {
return await getIflowUsage(accessToken);
case "ollama":
return await getOllamaUsage(accessToken);
case "glm":
case "glm-cn":
return await getGlmUsage(apiKey, provider, proxyOptions);
case "minimax":
case "minimax-cn":
return await getMiniMaxUsage(apiKey, provider, proxyOptions);
default:
return { message: `Usage API not implemented for ${provider}` };
}
@@ -188,31 +212,115 @@ function formatGitHubQuotaSnapshot(quota) {
}
/**
* Gemini CLI Usage (Google Cloud)
* Gemini CLI Usage — fetch per-model quota via Cloud Code Assist API.
* Uses retrieveUserQuota (same endpoint as `gemini /stats`) returning
* per-model buckets with remainingFraction + resetTime.
*/
async function getGeminiUsage(accessToken, proxyOptions = null) {
async function getGeminiUsage(accessToken, providerSpecificData, proxyOptions = null) {
if (!accessToken) {
return { plan: "Free", message: "Gemini CLI access token not available." };
}
try {
// Resolve project id: prefer connection-stored id, else loadCodeAssist lookup
let projectId = providerSpecificData?.projectId || null;
let plan = "Free";
if (!projectId) {
const subInfo = await getGeminiSubscriptionInfo(accessToken, proxyOptions);
projectId = subInfo?.cloudaicompanionProject || null;
plan = subInfo?.currentTier?.name || plan;
}
if (!projectId) {
return { plan, message: "Gemini CLI project ID not available." };
}
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 10000);
let response;
try {
response = await proxyAwareFetch(
"https://cloudcode-pa.googleapis.com/v1internal:retrieveUserQuota",
{
method: "POST",
headers: {
Authorization: `Bearer ${accessToken}`,
"Content-Type": "application/json",
},
body: JSON.stringify({ project: projectId }),
signal: controller.signal,
},
proxyOptions
);
} finally {
clearTimeout(timeoutId);
}
if (!response.ok) {
return { plan, message: `Gemini CLI quota error (${response.status}).` };
}
const data = await response.json();
const quotas = {};
if (Array.isArray(data.buckets)) {
for (const bucket of data.buckets) {
if (!bucket.modelId || bucket.remainingFraction == null) continue;
const remainingFraction = Number(bucket.remainingFraction) || 0;
const total = 1000; // Normalized base, matches antigravity convention
const remaining = Math.round(total * remainingFraction);
const used = Math.max(0, total - remaining);
quotas[bucket.modelId] = {
used,
total,
resetAt: parseResetTime(bucket.resetTime),
remainingPercentage: remainingFraction * 100,
unlimited: false,
};
}
}
return { plan, quotas };
} catch (error) {
return { message: `Gemini CLI error: ${error.message}` };
}
}
/**
* Get Gemini CLI subscription info via loadCodeAssist
*/
async function getGeminiSubscriptionInfo(accessToken, proxyOptions = null) {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 10000);
try {
// Gemini CLI uses Google Cloud quotas
// Try to get quota info from Cloud Resource Manager
const response = await proxyAwareFetch(
"https://cloudresourcemanager.googleapis.com/v1/projects?filter=lifecycleState:ACTIVE",
"https://cloudcode-pa.googleapis.com/v1internal:loadCodeAssist",
{
method: "POST",
headers: {
Authorization: `Bearer ${accessToken}`,
Accept: "application/json",
"Content-Type": "application/json",
},
body: JSON.stringify({
metadata: {
ideType: "IDE_UNSPECIFIED",
platform: "PLATFORM_UNSPECIFIED",
pluginType: "GEMINI",
},
}),
signal: controller.signal,
},
proxyOptions
);
if (!response.ok) {
// Quota API may not be accessible, return generic message
return { message: "Gemini CLI uses Google Cloud quotas. Check Google Cloud Console for details." };
}
return { message: "Gemini CLI connected. Usage tracked via Google Cloud Console." };
} catch (error) {
return { message: "Unable to fetch Gemini usage. Check Google Cloud Console." };
if (!response.ok) return null;
return await response.json();
} catch {
return null;
} finally {
clearTimeout(timeoutId);
}
}
@@ -798,3 +906,206 @@ async function getOllamaUsage(accessToken, providerSpecificData) {
return { message: "Unable to fetch Ollama Cloud usage." };
}
}
/**
* GLM Coding Plan usage (international + China regions)
*/
async function getGlmUsage(apiKey, provider, proxyOptions = null) {
if (!apiKey) {
return { message: "GLM API key not available." };
}
const region = provider === "glm-cn" ? "china" : "international";
const quotaUrl = GLM_QUOTA_URLS[region];
try {
const response = await proxyAwareFetch(quotaUrl, {
headers: {
Authorization: `Bearer ${apiKey}`,
Accept: "application/json",
},
}, proxyOptions);
if (!response.ok) {
if (response.status === 401) {
return { message: "GLM API key invalid or expired." };
}
return { message: `GLM quota API error (${response.status}).` };
}
const json = await response.json();
const data = json?.data && typeof json.data === "object" ? json.data : {};
const limits = Array.isArray(data.limits) ? data.limits : [];
const quotas = {};
for (const limit of limits) {
if (!limit || limit.type !== "TOKENS_LIMIT") continue;
const usedPercent = Number(limit.percentage) || 0;
const resetMs = Number(limit.nextResetTime) || 0;
const remaining = Math.max(0, 100 - usedPercent);
quotas["session"] = {
used: usedPercent,
total: 100,
remaining,
remainingPercentage: remaining,
resetAt: resetMs > 0 ? new Date(resetMs).toISOString() : null,
unlimited: false,
};
}
const levelRaw = typeof data.level === "string" ? data.level : "";
const plan = levelRaw
? levelRaw.charAt(0).toUpperCase() + levelRaw.slice(1).toLowerCase()
: "Unknown";
return { plan, quotas };
} catch (error) {
return { message: `GLM error: ${error.message}` };
}
}
// ── MiniMax helpers ──────────────────────────────────────────────────────
function isMiniMaxTextQuotaModel(modelName) {
const normalized = (modelName || "").trim().toLowerCase();
return normalized.startsWith("minimax-m") || normalized.startsWith("coding-plan");
}
function getMiniMaxField(model, snakeKey, camelKey) {
if (!model || typeof model !== "object") return null;
return model[snakeKey] ?? model[camelKey] ?? null;
}
function getMiniMaxSessionTotal(model) {
return Math.max(0, Number(getMiniMaxField(model, "current_interval_total_count", "currentIntervalTotalCount")) || 0);
}
function getMiniMaxWeeklyTotal(model) {
return Math.max(0, Number(getMiniMaxField(model, "current_weekly_total_count", "currentWeeklyTotalCount")) || 0);
}
function pickMiniMaxRepresentativeModel(models, getTotal) {
const withQuota = models.filter((m) => getTotal(m) > 0);
const pool = withQuota.length > 0 ? withQuota : models;
if (pool.length === 0) return null;
return pool.reduce((best, current) => (getTotal(current) > getTotal(best) ? current : best));
}
function getMiniMaxResetAt(model, capturedAtMs, remainsSnake, remainsCamel, endSnake, endCamel) {
const remainsMs = Number(getMiniMaxField(model, remainsSnake, remainsCamel)) || 0;
if (remainsMs > 0) return new Date(capturedAtMs + remainsMs).toISOString();
return parseResetTime(getMiniMaxField(model, endSnake, endCamel));
}
function buildMiniMaxQuota(total, count, resetAt, countMeansRemaining) {
const safeTotal = Math.max(0, total);
const used = countMeansRemaining ? Math.max(safeTotal - count, 0) : Math.min(Math.max(0, count), safeTotal);
const remaining = Math.max(safeTotal - used, 0);
return {
used,
total: safeTotal,
remaining,
remainingPercentage: safeTotal > 0 ? Math.max(0, Math.min(100, (remaining / safeTotal) * 100)) : 0,
resetAt,
unlimited: false,
};
}
/**
* MiniMax Token Plan / Coding Plan usage
*/
async function getMiniMaxUsage(apiKey, provider, proxyOptions = null) {
if (!apiKey) {
return { message: "MiniMax API key not available." };
}
const usageUrls = MINIMAX_USAGE_URLS[provider] || [];
let lastErrorMessage = "";
for (let index = 0; index < usageUrls.length; index += 1) {
const usageUrl = usageUrls[index];
const canFallback = index < usageUrls.length - 1;
try {
const response = await proxyAwareFetch(usageUrl, {
method: "GET",
headers: {
Authorization: `Bearer ${apiKey}`,
Accept: "application/json",
"Content-Type": "application/json",
},
}, proxyOptions);
const rawText = await response.text();
let payload = {};
if (rawText) {
try { payload = JSON.parse(rawText); } catch { payload = {}; }
}
const baseResp = (payload?.base_resp ?? payload?.baseResp) || {};
const apiStatusCode = Number(baseResp.status_code ?? baseResp.statusCode) || 0;
const apiStatusMessage = String(baseResp.status_msg ?? baseResp.statusMsg ?? "").trim();
const combined = `${apiStatusMessage} ${rawText}`.trim();
const authLike = /token plan|coding plan|invalid api key|invalid key|unauthorized|inactive/i;
if (response.status === 401 || response.status === 403 || apiStatusCode === 1004 || authLike.test(combined)) {
return { message: "MiniMax API key invalid or inactive. Use an active Token/Coding Plan key." };
}
if (!response.ok) {
lastErrorMessage = `MiniMax usage endpoint error (${response.status})`;
if ((response.status === 404 || response.status === 405 || response.status >= 500) && canFallback) continue;
return { message: `MiniMax connected. ${lastErrorMessage}` };
}
if (apiStatusCode !== 0) {
return { message: `MiniMax connected. ${apiStatusMessage || "Upstream quota API error"}` };
}
const modelRemains = payload?.model_remains ?? payload?.modelRemains;
const allModels = Array.isArray(modelRemains) ? modelRemains : [];
const textModels = allModels.filter((m) => isMiniMaxTextQuotaModel(String(getMiniMaxField(m, "model_name", "modelName"))));
if (textModels.length === 0) {
return { message: "MiniMax connected. No text quota data was returned." };
}
const capturedAtMs = Date.now();
const countMeansRemaining = usageUrl.includes("/coding_plan/remains");
const quotas = {};
const sessionModel = pickMiniMaxRepresentativeModel(textModels, getMiniMaxSessionTotal);
if (sessionModel) {
const total = getMiniMaxSessionTotal(sessionModel);
const count = Math.max(0, Number(getMiniMaxField(sessionModel, "current_interval_usage_count", "currentIntervalUsageCount")) || 0);
quotas["session (5h)"] = buildMiniMaxQuota(
total, count,
getMiniMaxResetAt(sessionModel, capturedAtMs, "remains_time", "remainsTime", "end_time", "endTime"),
countMeansRemaining
);
}
const weeklyModel = pickMiniMaxRepresentativeModel(textModels, getMiniMaxWeeklyTotal);
if (weeklyModel && getMiniMaxWeeklyTotal(weeklyModel) > 0) {
const total = getMiniMaxWeeklyTotal(weeklyModel);
const count = Math.max(0, Number(getMiniMaxField(weeklyModel, "current_weekly_usage_count", "currentWeeklyUsageCount")) || 0);
quotas["weekly (7d)"] = buildMiniMaxQuota(
total, count,
getMiniMaxResetAt(weeklyModel, capturedAtMs, "weekly_remains_time", "weeklyRemainsTime", "weekly_end_time", "weeklyEndTime"),
countMeansRemaining
);
}
if (Object.keys(quotas).length === 0) {
return { message: "MiniMax connected. Unable to extract quota usage." };
}
return { quotas };
} catch (error) {
lastErrorMessage = error.message;
if (!canFallback) break;
}
}
return { message: lastErrorMessage ? `MiniMax connected. Unable to fetch usage: ${lastErrorMessage}` : "MiniMax connected. Unable to fetch usage." };
}

View File

@@ -16,8 +16,10 @@ const MODEL_RULES = [
];
function shouldInject(message, scope) {
if (message?.role !== "assistant" || "reasoning_content" in message) return false;
if (scope === "toolCalls") return Array.isArray(message.tool_calls);
if (message?.role !== "assistant") return false;
const rc = message.reasoning_content;
if (typeof rc === "string" && rc.length > 0) return false;
if (scope === "toolCalls") return Array.isArray(message.tool_calls) && message.tool_calls.length > 0;
return true;
}