diff --git a/open-sse/config/providerModels.js b/open-sse/config/providerModels.js index b80c3b1c..b913360a 100644 --- a/open-sse/config/providerModels.js +++ b/open-sse/config/providerModels.js @@ -141,6 +141,18 @@ export const PROVIDER_MODELS = { { id: "deepseek/deepseek-chat", name: "DeepSeek Chat" }, { id: "deepseek/deepseek-reasoner", name: "DeepSeek Reasoner" }, ], + "opencode-go": [ // OpenCode Go subscription (API key) + { id: "kimi-k2.6", name: "Kimi K2.6" }, + { id: "kimi-k2.5", name: "Kimi K2.5" }, + { id: "glm-5.1", name: "GLM 5.1" }, + { id: "glm-5", name: "GLM 5" }, + { id: "qwen3.5-plus", name: "Qwen 3.5 Plus" }, + { id: "qwen3.6-plus", name: "Qwen 3.6 Plus" }, + { id: "mimo-v2-pro", name: "MiMo V2 Pro" }, + { id: "mimo-v2-omni", name: "MiMo V2 Omni" }, + { id: "minimax-m2.7", name: "MiniMax M2.7", targetFormat: "claude" }, + { id: "minimax-m2.5", name: "MiniMax M2.5", targetFormat: "claude" }, + ], oc: [ // OpenCode // { id: "nemotron-3-super-free", name: "Nemotron 3 Super" }, // { id: "qwen3.6-plus-free", name: "Qwen 3.6 Plus" }, @@ -192,6 +204,10 @@ export const PROVIDER_MODELS = { { id: "tts-1", name: "TTS-1", type: "tts" }, { id: "tts-1-hd", name: "TTS-1 HD", type: "tts" }, { id: "gpt-4o-mini-tts", name: "GPT-4o Mini TTS", type: "tts" }, + // Image models + { id: "gpt-image-1", name: "GPT Image 1", type: "image" }, + { id: "dall-e-3", name: "DALL-E 3", type: "image" }, + { id: "dall-e-2", name: "DALL-E 2", type: "image" }, ], anthropic: [ { id: "claude-sonnet-4-20250514", name: "Claude Sonnet 4" }, @@ -219,6 +235,10 @@ export const PROVIDER_MODELS = { { id: "gemini-embedding-001", name: "Gemini Embedding 001", type: "embedding" }, { id: "text-embedding-005", name: "Text Embedding 005", type: "embedding" }, { id: "text-embedding-004", name: "Text Embedding 004 (Legacy)", type: "embedding" }, + // Image models (Nano Banana) + { id: "gemini-3.1-flash-image-preview", name: "Gemini 3.1 Flash Image (Nano Banana 2)", type: "image" }, + { id: "gemini-3-pro-image-preview", name: "Gemini 3 Pro Image (Nano Banana Pro)", type: "image" }, + { id: "gemini-2.5-flash-image", name: "Gemini 2.5 Flash Image (Nano Banana)", type: "image" }, ], openrouter: [ // Embedding models @@ -233,6 +253,11 @@ export const PROVIDER_MODELS = { { id: "openai/gpt-4o-mini-tts", name: "GPT-4o Mini TTS", type: "tts" }, { id: "openai/tts-1-hd", name: "TTS-1 HD", type: "tts" }, { id: "openai/tts-1", name: "TTS-1", type: "tts" }, + // Image models + { id: "openai/dall-e-3", name: "DALL-E 3 (via OpenRouter)", type: "image" }, + { id: "openai/gpt-image-1", name: "GPT Image 1 (via OpenRouter)", type: "image" }, + { id: "google/imagen-3.0-generate-002", name: "Imagen 3 (via OpenRouter)", type: "image" }, + { id: "black-forest-labs/FLUX.1-schnell", name: "FLUX.1 Schnell (via OpenRouter)", type: "image" }, ], glm: [ { id: "glm-5.1", name: "GLM 5.1" }, @@ -256,6 +281,8 @@ export const PROVIDER_MODELS = { { id: "MiniMax-M2.7", name: "MiniMax M2.7" }, { id: "MiniMax-M2.5", name: "MiniMax M2.5" }, { id: "MiniMax-M2.1", name: "MiniMax M2.1" }, + // Image models + { id: "minimax-image-01", name: "MiniMax Image 01", type: "image" }, ], blackbox: [ { id: "gpt-4o", name: "GPT-4o" }, @@ -424,6 +451,24 @@ export const PROVIDER_MODELS = { // TTS entries are loaded from ttsModels.js via buildTtsProviderModels() ...buildTtsProviderModels(), + + // Image providers + nanobanana: [ + { id: "nanobanana-flash", name: "NanoBanana Flash", type: "image" }, + { id: "nanobanana-pro", name: "NanoBanana Pro", type: "image" }, + ], + sdwebui: [ + { id: "stable-diffusion-v1-5", name: "Stable Diffusion v1.5", type: "image" }, + { id: "sdxl-base-1.0", name: "SDXL Base 1.0", type: "image" }, + ], + comfyui: [ + { id: "flux-dev", name: "FLUX Dev", type: "image" }, + { id: "sdxl", name: "SDXL", type: "image" }, + ], + huggingface: [ + { id: "black-forest-labs/FLUX.1-schnell", name: "FLUX.1 Schnell", type: "image" }, + { id: "stabilityai/stable-diffusion-xl-base-1.0", name: "SDXL Base 1.0", type: "image" }, + ], }; // Helper functions diff --git a/open-sse/config/providers.js b/open-sse/config/providers.js index c7c9ee82..3c5dabdc 100644 --- a/open-sse/config/providers.js +++ b/open-sse/config/providers.js @@ -337,6 +337,11 @@ export const PROVIDERS = { headers: { "x-opencode-client": "desktop" }, noAuth: true }, + "opencode-go": { + baseUrl: "https://opencode.ai/zen/go/v1/chat/completions", + format: "openai", + headers: {} + }, "grok-web": { baseUrl: "https://grok.com/rest/app-chat/conversations/new", format: "grok-web", diff --git a/open-sse/executors/index.js b/open-sse/executors/index.js index f1e175f4..b7ca8a15 100644 --- a/open-sse/executors/index.js +++ b/open-sse/executors/index.js @@ -9,6 +9,7 @@ import { CursorExecutor } from "./cursor.js"; import { VertexExecutor } from "./vertex.js"; import { QwenExecutor } from "./qwen.js"; import { OpenCodeExecutor } from "./opencode.js"; +import { OpenCodeGoExecutor } from "./opencode-go.js"; import { GrokWebExecutor } from "./grok-web.js"; import { PerplexityWebExecutor } from "./perplexity-web.js"; import { DefaultExecutor } from "./default.js"; @@ -27,6 +28,7 @@ const executors = { "vertex-partner": new VertexExecutor("vertex-partner"), qwen: new QwenExecutor(), opencode: new OpenCodeExecutor(), + "opencode-go": new OpenCodeGoExecutor(), "grok-web": new GrokWebExecutor(), "perplexity-web": new PerplexityWebExecutor(), }; @@ -56,5 +58,6 @@ export { VertexExecutor } from "./vertex.js"; export { DefaultExecutor } from "./default.js"; export { QwenExecutor } from "./qwen.js"; export { OpenCodeExecutor } from "./opencode.js"; +export { OpenCodeGoExecutor } from "./opencode-go.js"; export { GrokWebExecutor } from "./grok-web.js"; export { PerplexityWebExecutor } from "./perplexity-web.js"; diff --git a/open-sse/executors/opencode-go.js b/open-sse/executors/opencode-go.js new file mode 100644 index 00000000..c75a4724 --- /dev/null +++ b/open-sse/executors/opencode-go.js @@ -0,0 +1,51 @@ +import { BaseExecutor } from "./base.js"; +import { PROVIDERS } from "../config/providers.js"; + +// Models that use /zen/go/v1/messages (Anthropic/Claude format + x-api-key auth) +const CLAUDE_FORMAT_MODELS = new Set(["minimax-m2.5", "minimax-m2.7"]); + +const BASE = "https://opencode.ai/zen/go/v1"; + +// Kimi (Moonshot) requires reasoning_content on assistant tool_call messages when thinking is on. +// OpenAI-format clients don't send it -> upstream 400. Inject a non-empty placeholder. +const KIMI_REASONING_PLACEHOLDER = " "; + +export class OpenCodeGoExecutor extends BaseExecutor { + constructor() { + super("opencode-go", PROVIDERS["opencode-go"]); + } + + // buildUrl runs before buildHeaders in BaseExecutor.execute, cache model here + buildUrl(model) { + this._lastModel = model; + return CLAUDE_FORMAT_MODELS.has(model) + ? `${BASE}/messages` + : `${BASE}/chat/completions`; + } + + buildHeaders(credentials, stream = true) { + const key = credentials?.apiKey || credentials?.accessToken; + const headers = { "Content-Type": "application/json" }; + + if (CLAUDE_FORMAT_MODELS.has(this._lastModel)) { + headers["x-api-key"] = key; + headers["anthropic-version"] = "2023-06-01"; + } else { + headers["Authorization"] = `Bearer ${key}`; + } + + if (stream) headers["Accept"] = "text/event-stream"; + return headers; + } + + transformRequest(model, body) { + if (!model?.startsWith?.("kimi-") || !body?.messages) return body; + const messages = body.messages.map(m => { + if (m?.role === "assistant" && Array.isArray(m.tool_calls) && !("reasoning_content" in m)) { + return { ...m, reasoning_content: KIMI_REASONING_PLACEHOLDER }; + } + return m; + }); + return { ...body, messages }; + } +} diff --git a/open-sse/handlers/imageGenerationCore.js b/open-sse/handlers/imageGenerationCore.js new file mode 100644 index 00000000..2189f5f7 --- /dev/null +++ b/open-sse/handlers/imageGenerationCore.js @@ -0,0 +1,320 @@ +import { createErrorResult, parseUpstreamError, formatProviderError } from "../utils/error.js"; +import { HTTP_STATUS } from "../config/runtimeConfig.js"; +import { refreshWithRetry } from "../services/tokenRefresh.js"; +import { getExecutor } from "../executors/index.js"; + +// Image provider configurations +const IMAGE_PROVIDERS = { + openai: { + baseUrl: "https://api.openai.com/v1/images/generations", + format: "openai", + }, + gemini: { + baseUrl: "https://generativelanguage.googleapis.com/v1beta/models", + format: "gemini", + }, + minimax: { + baseUrl: "https://api.minimaxi.com/v1/images/generations", + format: "openai", + }, + openrouter: { + baseUrl: "https://openrouter.ai/api/v1/images/generations", + format: "openai", + }, + nanobanana: { + baseUrl: "https://api.nanobananaapi.ai/api/v1/nanobanana/generate", + format: "nanobanana", + }, + sdwebui: { + baseUrl: "http://localhost:7860/sdapi/v1/txt2img", + format: "sdwebui", + }, + comfyui: { + baseUrl: "http://localhost:8188", + format: "comfyui", + }, + huggingface: { + baseUrl: "https://api-inference.huggingface.co/models", + format: "huggingface", + }, +}; + +/** + * Build image generation URL + */ +function buildImageUrl(provider, model, credentials) { + const config = IMAGE_PROVIDERS[provider]; + if (!config) return null; + + switch (provider) { + case "gemini": { + const apiKey = credentials?.apiKey || credentials?.accessToken; + const modelId = model.replace(/^models\//, ""); + return `${config.baseUrl}/${modelId}:generateContent?key=${encodeURIComponent(apiKey)}`; + } + case "huggingface": + return `${config.baseUrl}/${model}`; + default: + return config.baseUrl; + } +} + +/** + * Build request headers + */ +function buildImageHeaders(provider, credentials) { + const headers = { "Content-Type": "application/json" }; + + if (provider === "gemini") { + return headers; + } + + if (provider === "openrouter") { + headers["Authorization"] = `Bearer ${credentials?.apiKey || credentials?.accessToken}`; + headers["HTTP-Referer"] = "https://endpoint-proxy.local"; + headers["X-Title"] = "Endpoint Proxy"; + return headers; + } + + if (provider === "huggingface") { + headers["Authorization"] = `Bearer ${credentials?.apiKey || credentials?.accessToken}`; + return headers; + } + + if (credentials?.apiKey || credentials?.accessToken) { + headers["Authorization"] = `Bearer ${credentials.apiKey || credentials.accessToken}`; + } + + return headers; +} + +/** + * Build request body based on provider format + */ +function buildImageBody(provider, model, body) { + const { prompt, n = 1, size = "1024x1024", quality, style, response_format } = body; + + switch (provider) { + case "gemini": + return { + contents: [{ parts: [{ text: prompt }] }], + generationConfig: { + responseModalities: ["TEXT", "IMAGE"], + }, + }; + + case "sdwebui": { + const [width, height] = size.split("x").map(Number); + return { + prompt, + width: width || 512, + height: height || 512, + steps: 20, + batch_size: n, + }; + } + + case "nanobanana": { + const sizeMap = { + "1024x1024": "1:1", + "1024x1792": "9:16", + "1792x1024": "16:9", + }; + return { + prompt, + type: "TEXTTOIAMGE", + numImages: n, + image_size: sizeMap[size] || "1:1", + }; + } + + default: + // OpenAI-compatible format + const requestBody = { model, prompt, n, size }; + if (quality) requestBody.quality = quality; + if (style) requestBody.style = style; + if (response_format) requestBody.response_format = response_format; + return requestBody; + } +} + +/** + * Normalize response to OpenAI format + */ +function normalizeImageResponse(responseBody, provider, prompt) { + // Already in OpenAI format + if (responseBody.created && Array.isArray(responseBody.data)) { + return responseBody; + } + + const timestamp = Math.floor(Date.now() / 1000); + + switch (provider) { + case "gemini": { + const parts = responseBody.candidates?.[0]?.content?.parts || []; + const images = parts + .filter((p) => p.inlineData?.data) + .map((p) => ({ b64_json: p.inlineData.data })); + return { + created: timestamp, + data: images.length > 0 ? images : [{ b64_json: "", revised_prompt: prompt }], + }; + } + + case "sdwebui": { + const images = Array.isArray(responseBody.images) + ? responseBody.images.map((img) => ({ b64_json: img })) + : []; + return { created: timestamp, data: images }; + } + + case "nanobanana": { + if (responseBody.image) { + return { + created: timestamp, + data: [{ b64_json: responseBody.image, revised_prompt: prompt }], + }; + } + return { created: timestamp, data: [] }; + } + + case "huggingface": { + // HuggingFace returns binary image data + return responseBody; + } + + default: + return responseBody; + } +} + +/** + * Core image generation handler + * @param {object} options + * @param {object} options.body - Request body { model, prompt, n, size, ... } + * @param {object} options.modelInfo - { provider, model } + * @param {object} options.credentials - Provider credentials + * @param {object} [options.log] - Logger + * @param {function} [options.onCredentialsRefreshed] - Called when creds are refreshed + * @param {function} [options.onRequestSuccess] - Called on success + * @returns {Promise<{ success: boolean, response: Response, status?: number, error?: string }>} + */ +export async function handleImageGenerationCore({ + body, + modelInfo, + credentials, + log, + onCredentialsRefreshed, + onRequestSuccess, +}) { + const { provider, model } = modelInfo; + + if (!body.prompt) { + return createErrorResult(HTTP_STATUS.BAD_REQUEST, "Missing required field: prompt"); + } + + const url = buildImageUrl(provider, model, credentials); + if (!url) { + return createErrorResult( + HTTP_STATUS.BAD_REQUEST, + `Provider '${provider}' does not support image generation` + ); + } + + const headers = buildImageHeaders(provider, credentials); + const requestBody = buildImageBody(provider, model, body); + + log?.debug?.("IMAGE", `${provider.toUpperCase()} | ${model} | prompt="${body.prompt.slice(0, 50)}..."`); + + let providerResponse; + try { + providerResponse = await fetch(url, { + method: "POST", + headers, + body: JSON.stringify(requestBody), + }); + } catch (error) { + const errMsg = formatProviderError(error, provider, model, HTTP_STATUS.BAD_GATEWAY); + log?.debug?.("IMAGE", `Fetch error: ${errMsg}`); + return createErrorResult(HTTP_STATUS.BAD_GATEWAY, errMsg); + } + + // Handle 401/403 — try token refresh + const executor = getExecutor(provider); + if ( + !executor?.noAuth && + (providerResponse.status === HTTP_STATUS.UNAUTHORIZED || + providerResponse.status === HTTP_STATUS.FORBIDDEN) + ) { + const newCredentials = await refreshWithRetry( + () => executor.refreshCredentials(credentials, log), + 3, + log + ); + + if (newCredentials?.accessToken || newCredentials?.apiKey) { + log?.info?.("TOKEN", `${provider.toUpperCase()} | refreshed for image generation`); + Object.assign(credentials, newCredentials); + if (onCredentialsRefreshed && newCredentials) { + await onCredentialsRefreshed(newCredentials); + } + + try { + const retryHeaders = buildImageHeaders(provider, credentials); + const retryUrl = provider === "gemini" ? buildImageUrl(provider, model, credentials) : url; + + providerResponse = await fetch(retryUrl, { + method: "POST", + headers: retryHeaders, + body: JSON.stringify(requestBody), + }); + } catch (retryError) { + log?.warn?.("TOKEN", `${provider.toUpperCase()} | retry after refresh failed`); + } + } else { + log?.warn?.("TOKEN", `${provider.toUpperCase()} | refresh failed`); + } + } + + if (!providerResponse.ok) { + const { statusCode, message } = await parseUpstreamError(providerResponse); + const errMsg = formatProviderError(new Error(message), provider, model, statusCode); + log?.debug?.("IMAGE", `Provider error: ${errMsg}`); + return createErrorResult(statusCode, errMsg); + } + + let responseBody; + try { + // HuggingFace returns binary image data + if (provider === "huggingface") { + const buffer = await providerResponse.arrayBuffer(); + const base64 = Buffer.from(buffer).toString("base64"); + responseBody = { + created: Math.floor(Date.now() / 1000), + data: [{ b64_json: base64 }], + }; + } else { + responseBody = await providerResponse.json(); + } + } catch (parseError) { + return createErrorResult(HTTP_STATUS.BAD_GATEWAY, `Invalid response from ${provider}`); + } + + if (onRequestSuccess) { + await onRequestSuccess(); + } + + const normalized = normalizeImageResponse(responseBody, provider, body.prompt); + + log?.debug?.("IMAGE", `Success | images=${normalized.data?.length || 0}`); + + return { + success: true, + response: new Response(JSON.stringify(normalized), { + headers: { + "Content-Type": "application/json", + "Access-Control-Allow-Origin": "*", + }, + }), + }; +} diff --git a/open-sse/services/model.js b/open-sse/services/model.js index 1b3d96ba..ebe8d90b 100644 --- a/open-sse/services/model.js +++ b/open-sse/services/model.js @@ -13,6 +13,7 @@ const ALIAS_TO_PROVIDER_ID = { kmc: "kimi-coding", cl: "cline", oc: "opencode", + ocg: "opencode-go", // TTS providers el: "elevenlabs", // API Key providers diff --git a/public/providers/opencode-go.png b/public/providers/opencode-go.png new file mode 100644 index 00000000..2e709e1c Binary files /dev/null and b/public/providers/opencode-go.png differ diff --git a/src/app/(dashboard)/dashboard/media-providers/[kind]/[id]/page.js b/src/app/(dashboard)/dashboard/media-providers/[kind]/[id]/page.js index e4d52276..ef8fd0cf 100644 --- a/src/app/(dashboard)/dashboard/media-providers/[kind]/[id]/page.js +++ b/src/app/(dashboard)/dashboard/media-providers/[kind]/[id]/page.js @@ -823,6 +823,10 @@ function GenericExampleCard({ providerId, kind }) { const exConfig = KIND_EXAMPLE_CONFIG[kind]; if (!kindConfig || !exConfig) return null; + // Get models for this kind (e.g., type="image") + const kindModels = getModelsByProviderId(providerId).filter((m) => m.type === kind); + const [selectedModel, setSelectedModel] = useState(kindModels[0]?.id ?? ""); + const [input, setInput] = useState(exConfig.defaultInput); const [apiKey, setApiKey] = useState(""); const [useTunnel, setUseTunnel] = useState(false); @@ -848,9 +852,10 @@ function GenericExampleCard({ providerId, kind }) { const endpoint = useTunnel ? tunnelEndpoint : localEndpoint; const apiPath = kindConfig.endpoint.path; + const modelFull = selectedModel ? `${providerAlias}/${selectedModel}` : ""; const requestBody = { - model: `${providerAlias}/model-name`, + model: modelFull, [exConfig.bodyKey]: input, ...exConfig.extraBody, }; @@ -861,7 +866,7 @@ function GenericExampleCard({ providerId, kind }) { -d '${JSON.stringify(requestBody)}'`; const handleRun = async () => { - if (!input.trim()) return; + if (!input.trim() || !modelFull) return; setRunning(true); setError(""); setResult(null); @@ -869,7 +874,7 @@ function GenericExampleCard({ providerId, kind }) { try { const headers = { "Content-Type": "application/json" }; if (apiKey) headers["Authorization"] = `Bearer ${apiKey}`; - const body = { ...requestBody, model: `${providerAlias}/model-name` }; + const body = { ...requestBody, model: modelFull }; const res = await fetch(`/api${apiPath}`, { method: kindConfig.endpoint.method, headers, @@ -892,6 +897,21 @@ function GenericExampleCard({ providerId, kind }) {

Example

+ {/* Model selector - only show if models available */} + {kindModels.length > 0 && ( + + + + )} + {/* Endpoint */}
@@ -953,11 +973,11 @@ function GenericExampleCard({ providerId, kind }) { {copiedCurl ? "check" : "content_copy"} {copiedCurl ? "Copied" : "Copy"} -
diff --git a/src/app/(dashboard)/dashboard/providers/components/ModelsCard.js b/src/app/(dashboard)/dashboard/providers/components/ModelsCard.js index 5cb32e6e..a9f66d2d 100644 --- a/src/app/(dashboard)/dashboard/providers/components/ModelsCard.js +++ b/src/app/(dashboard)/dashboard/providers/components/ModelsCard.js @@ -111,6 +111,7 @@ AddCustomModelModal.propTypes = { export default function ModelsCard({ providerId, kindFilter }) { const { copied, copy } = useCopyToClipboard(); const [modelAliases, setModelAliases] = useState({}); + const [customModels, setCustomModels] = useState([]); const [modelTestResults, setModelTestResults] = useState({}); const [testingModelId, setTestingModelId] = useState(null); const [testError, setTestError] = useState(""); @@ -118,17 +119,21 @@ export default function ModelsCard({ providerId, kindFilter }) { const [connections, setConnections] = useState([]); const providerAlias = getProviderAlias(providerId); + const effectiveType = kindFilter || "llm"; const fetchData = useCallback(async () => { try { - const [aliasRes, connRes] = await Promise.all([ + const [aliasRes, connRes, customRes] = await Promise.all([ fetch("/api/models/alias"), fetch("/api/providers", { cache: "no-store" }), + fetch("/api/models/custom", { cache: "no-store" }), ]); const aliasData = await aliasRes.json(); const connData = await connRes.json(); + const customData = await customRes.json(); if (aliasRes.ok) setModelAliases(aliasData.aliases || {}); if (connRes.ok) setConnections((connData.connections || []).filter((c) => c.provider === providerId)); + if (customRes.ok) setCustomModels(customData.models || []); } catch (e) { console.log("ModelsCard fetch error:", e); } }, [providerId]); @@ -153,6 +158,25 @@ export default function ModelsCard({ providerId, kindFilter }) { } catch (e) { console.log("delete alias error:", e); } }; + const handleAddCustomModel = async (modelId) => { + try { + const res = await fetch("/api/models/custom", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ providerAlias, id: modelId, type: effectiveType }), + }); + if (res.ok) await fetchData(); + } catch (e) { console.log("add custom model error:", e); } + }; + + const handleDeleteCustomModel = async (modelId) => { + try { + const params = new URLSearchParams({ providerAlias, id: modelId, type: effectiveType }); + const res = await fetch(`/api/models/custom?${params}`, { method: "DELETE" }); + if (res.ok) await fetchData(); + } catch (e) { console.log("delete custom model error:", e); } + }; + const handleTestModel = async (modelId) => { if (testingModelId) return; setTestingModelId(modelId); @@ -171,28 +195,23 @@ export default function ModelsCard({ providerId, kindFilter }) { } finally { setTestingModelId(null); } }; - // Get models — filter by kindFilter if provided - const allModels = getModelsByProviderId(providerId); - const displayModels = kindFilter - ? allModels.filter((m) => { + // Built-in models — filter by kindFilter if provided + const allBuiltIn = getModelsByProviderId(providerId); + const builtInModels = kindFilter + ? allBuiltIn.filter((m) => { if (m.kinds) return m.kinds.includes(kindFilter); - if (m.type) return m.type === kindFilter; - return kindFilter === "llm"; + return (m.type || "llm") === kindFilter; }) - : allModels; + : allBuiltIn; - // Custom models added via alias - const customModels = Object.entries(modelAliases) - .filter(([alias, fullModel]) => { - const prefix = `${providerAlias}/`; - if (!fullModel.startsWith(prefix)) return false; - const modelId = fullModel.slice(prefix.length); - return !displayModels.some((m) => m.id === modelId) && alias === modelId; - }) - .map(([alias, fullModel]) => ({ - id: fullModel.slice(`${providerAlias}/`.length), - alias, - })); + // Custom models for this provider + kind, dedupe vs built-in + const myCustomModels = customModels.filter( + (m) => m.providerAlias === providerAlias + && (m.type || "llm") === effectiveType + && !builtInModels.some((b) => b.id === m.id) + ); + + const displayModels = builtInModels; return ( <> @@ -224,16 +243,15 @@ export default function ModelsCard({ providerId, kindFilter }) { ); })} - {customModels.map((model) => ( + {myCustomModels.map((model) => ( {}} - onDeleteAlias={() => handleDeleteAlias(model.alias)} + onDeleteAlias={() => handleDeleteCustomModel(model.id)} testStatus={modelTestResults[model.id]} onTest={connections.length > 0 ? () => handleTestModel(model.id) : undefined} isTesting={testingModelId === model.id} @@ -254,7 +272,7 @@ export default function ModelsCard({ providerId, kindFilter }) { { - await handleSetAlias(modelId, modelId); + await handleAddCustomModel(modelId); setShowAddCustomModel(false); }} onClose={() => setShowAddCustomModel(false)} diff --git a/src/app/api/models/custom/route.js b/src/app/api/models/custom/route.js new file mode 100644 index 00000000..76e14c96 --- /dev/null +++ b/src/app/api/models/custom/route.js @@ -0,0 +1,48 @@ +import { NextResponse } from "next/server"; +import { getCustomModels, addCustomModel, deleteCustomModel } from "@/models"; + +export const dynamic = "force-dynamic"; + +// GET /api/models/custom - List all custom models +export async function GET() { + try { + const models = await getCustomModels(); + return NextResponse.json({ models }); + } catch (error) { + console.log("Error fetching custom models:", error); + return NextResponse.json({ error: "Failed to fetch custom models" }, { status: 500 }); + } +} + +// POST /api/models/custom - Add custom model +export async function POST(request) { + try { + const { providerAlias, id, type, name } = await request.json(); + if (!providerAlias || !id) { + return NextResponse.json({ error: "providerAlias and id required" }, { status: 400 }); + } + const added = await addCustomModel({ providerAlias, id, type: type || "llm", name }); + return NextResponse.json({ success: true, added }); + } catch (error) { + console.log("Error adding custom model:", error); + return NextResponse.json({ error: "Failed to add custom model" }, { status: 500 }); + } +} + +// DELETE /api/models/custom?providerAlias=xxx&id=yyy&type=zzz +export async function DELETE(request) { + try { + const { searchParams } = new URL(request.url); + const providerAlias = searchParams.get("providerAlias"); + const id = searchParams.get("id"); + const type = searchParams.get("type") || "llm"; + if (!providerAlias || !id) { + return NextResponse.json({ error: "providerAlias and id required" }, { status: 400 }); + } + await deleteCustomModel({ providerAlias, id, type }); + return NextResponse.json({ success: true }); + } catch (error) { + console.log("Error deleting custom model:", error); + return NextResponse.json({ error: "Failed to delete custom model" }, { status: 500 }); + } +} diff --git a/src/app/api/v1/images/generations/route.js b/src/app/api/v1/images/generations/route.js new file mode 100644 index 00000000..302a718b --- /dev/null +++ b/src/app/api/v1/images/generations/route.js @@ -0,0 +1,16 @@ +import { handleImageGeneration } from "@/sse/handlers/imageGeneration.js"; + +export async function OPTIONS() { + return new Response(null, { + headers: { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "POST, OPTIONS", + "Access-Control-Allow-Headers": "*", + }, + }); +} + +/** POST /v1/images/generations - OpenAI-compatible image generation endpoint */ +export async function POST(request) { + return await handleImageGeneration(request); +} diff --git a/src/lib/localDb.js b/src/lib/localDb.js index ced9b3c8..f2c2cafd 100644 --- a/src/lib/localDb.js +++ b/src/lib/localDb.js @@ -44,6 +44,7 @@ function cloneDefaultData() { providerNodes: [], proxyPools: [], modelAliases: {}, + customModels: [], mitmAlias: {}, combos: [], apiKeys: [], @@ -515,6 +516,33 @@ export async function deleteModelAlias(alias) { await safeWrite(db); } +// Custom models — user-added models with explicit type (llm/image/tts/embedding/...) +export async function getCustomModels() { + const db = await getDb(); + return db.data.customModels || []; +} + +export async function addCustomModel({ providerAlias, id, type = "llm", name }) { + const db = await getDb(); + if (!db.data.customModels) db.data.customModels = []; + const exists = db.data.customModels.some( + (m) => m.providerAlias === providerAlias && m.id === id && (m.type || "llm") === type + ); + if (exists) return false; + db.data.customModels.push({ providerAlias, id, type, name: name || id }); + await safeWrite(db); + return true; +} + +export async function deleteCustomModel({ providerAlias, id, type = "llm" }) { + const db = await getDb(); + if (!db.data.customModels) return; + db.data.customModels = db.data.customModels.filter( + (m) => !(m.providerAlias === providerAlias && m.id === id && (m.type || "llm") === type) + ); + await safeWrite(db); +} + export async function getMitmAlias(toolName) { const db = await getDb(); const all = db.data.mitmAlias || {}; diff --git a/src/models/index.js b/src/models/index.js index e61129fe..99444f62 100644 --- a/src/models/index.js +++ b/src/models/index.js @@ -25,6 +25,9 @@ export { getModelAliases, setModelAlias, deleteModelAlias, + getCustomModels, + addCustomModel, + deleteCustomModel, getMitmAlias, setMitmAliasAll, getApiKeys, diff --git a/src/shared/components/Sidebar.js b/src/shared/components/Sidebar.js index 50ab12ed..eee07906 100644 --- a/src/shared/components/Sidebar.js +++ b/src/shared/components/Sidebar.js @@ -12,7 +12,7 @@ import Button from "./Button"; import { ConfirmModal } from "./Modal"; // const VISIBLE_MEDIA_KINDS = ["embedding", "image", "imageToText", "tts", "stt", "webSearch", "webFetch", "video", "music"]; -const VISIBLE_MEDIA_KINDS = ["embedding", "tts"]; +const VISIBLE_MEDIA_KINDS = ["embedding", "image", "tts"]; const navItems = [ { href: "/dashboard/endpoint", label: "Endpoint", icon: "api" }, diff --git a/src/shared/constants/providers.js b/src/shared/constants/providers.js index 6fc007cb..a024080e 100644 --- a/src/shared/constants/providers.js +++ b/src/shared/constants/providers.js @@ -9,7 +9,7 @@ export const FREE_PROVIDERS = { // codebuddy: { id: "codebuddy", alias: "cb", name: "CodeBuddy", icon: "smart_toy", color: "#006EFF" }, // qoder: { id: "qoder", alias: "qd", name: "Qoder AI", icon: "water_drop", color: "#EC4899" }, iflow: { id: "iflow", alias: "if", name: "iFlow AI", icon: "water_drop", color: "#6366F1" }, - opencode: { id: "opencode", alias: "oc", name: "OpenCode", icon: "terminal", color: "#E87040", textIcon: "OC", noAuth: true, passthroughModels: true, modelsFetcher: { url: "https://opencode.ai/zen/v1/models", type: "opencode-free" } }, + opencode: { id: "opencode", alias: "oc", name: "OpenCode Free", icon: "terminal", color: "#E87040", textIcon: "OC", noAuth: true, passthroughModels: true, modelsFetcher: { url: "https://opencode.ai/zen/v1/models", type: "opencode-free" } }, }; // Free Tier Providers (has free access but may require account/API key) @@ -61,6 +61,7 @@ export const APIKEY_PROVIDERS = { "alicode-intl": { id: "alicode-intl", alias: "alicode-intl", name: "Alibaba Intl", icon: "cloud", color: "#FF6A00", textIcon: "ALi" }, openai: { id: "openai", alias: "openai", name: "OpenAI", icon: "auto_awesome", color: "#10A37F", textIcon: "OA", website: "https://platform.openai.com", serviceKinds: ["llm", "embedding", "tts", "image", "imageToText", "webSearch"], thinkingConfig: THINKING_CONFIG.effort }, anthropic: { id: "anthropic", alias: "anthropic", name: "Anthropic", icon: "smart_toy", color: "#D97757", textIcon: "AN", website: "https://console.anthropic.com", serviceKinds: ["llm", "imageToText"] }, + "opencode-go": { id: "opencode-go", alias: "ocg", name: "OpenCode Go", icon: "terminal", color: "#E87040", textIcon: "OC", website: "https://opencode.ai/auth", notice: { text: "OpenCode Go subscription: $5/mo (then $10/mo). Access to Kimi, GLM, Qwen, MiMo, MiniMax models.", apiKeyUrl: "https://opencode.ai/auth" } }, deepseek: { id: "deepseek", alias: "ds", name: "DeepSeek", icon: "bolt", color: "#4D6BFE", textIcon: "DS", website: "https://deepseek.com" }, diff --git a/src/sse/handlers/imageGeneration.js b/src/sse/handlers/imageGeneration.js new file mode 100644 index 00000000..47911958 --- /dev/null +++ b/src/sse/handlers/imageGeneration.js @@ -0,0 +1,152 @@ +import { + getProviderCredentials, + markAccountUnavailable, + clearAccountError, + extractApiKey, + isValidApiKey, +} from "../services/auth.js"; +import { getSettings } from "@/lib/localDb"; +import { getModelInfo } from "../services/model.js"; +import { handleImageGenerationCore } from "open-sse/handlers/imageGenerationCore.js"; +import { errorResponse, unavailableResponse } from "open-sse/utils/error.js"; +import { HTTP_STATUS } from "open-sse/config/runtimeConfig.js"; +import * as log from "../utils/logger.js"; +import { updateProviderCredentials, checkAndRefreshToken } from "../services/tokenRefresh.js"; + +// Providers that don't require credentials (noAuth) +const NO_AUTH_PROVIDERS = new Set(["sdwebui", "comfyui"]); + +/** + * Handle image generation request + * @param {Request} request + */ +export async function handleImageGeneration(request) { + let body; + try { + body = await request.json(); + } catch { + log.warn("IMAGE", "Invalid JSON body"); + return errorResponse(HTTP_STATUS.BAD_REQUEST, "Invalid JSON body"); + } + + const url = new URL(request.url); + const modelStr = body.model; + + log.request("POST", `${url.pathname} | ${modelStr}`); + + const apiKey = extractApiKey(request); + if (apiKey) { + log.debug("AUTH", `API Key: ${log.maskKey(apiKey)}`); + } else { + log.debug("AUTH", "No API key provided (local mode)"); + } + + const settings = await getSettings(); + if (settings.requireApiKey) { + if (!apiKey) { + log.warn("AUTH", "Missing API key (requireApiKey=true)"); + return errorResponse(HTTP_STATUS.UNAUTHORIZED, "Missing API key"); + } + const valid = await isValidApiKey(apiKey); + if (!valid) { + log.warn("AUTH", "Invalid API key (requireApiKey=true)"); + return errorResponse(HTTP_STATUS.UNAUTHORIZED, "Invalid API key"); + } + } + + if (!modelStr) { + log.warn("IMAGE", "Missing model"); + return errorResponse(HTTP_STATUS.BAD_REQUEST, "Missing model"); + } + + if (!body.prompt) { + log.warn("IMAGE", "Missing prompt"); + return errorResponse(HTTP_STATUS.BAD_REQUEST, "Missing required field: prompt"); + } + + const modelInfo = await getModelInfo(modelStr); + if (!modelInfo.provider) { + log.warn("IMAGE", "Invalid model format", { model: modelStr }); + return errorResponse(HTTP_STATUS.BAD_REQUEST, "Invalid model format"); + } + + const { provider, model } = modelInfo; + + if (modelStr !== `${provider}/${model}`) { + log.info("ROUTING", `${modelStr} → ${provider}/${model}`); + } else { + log.info("ROUTING", `Provider: ${provider}, Model: ${model}`); + } + + // noAuth providers — no credential needed + if (NO_AUTH_PROVIDERS.has(provider)) { + const result = await handleImageGenerationCore({ + body, + modelInfo: { provider, model }, + credentials: null, + log, + }); + if (result.success) return result.response; + return errorResponse(result.status || HTTP_STATUS.BAD_GATEWAY, result.error || "Image generation failed"); + } + + // Credentialed providers — fallback loop + const excludeConnectionIds = new Set(); + let lastError = null; + let lastStatus = null; + + while (true) { + const credentials = await getProviderCredentials(provider, excludeConnectionIds, model); + + if (!credentials || credentials.allRateLimited) { + if (credentials?.allRateLimited) { + const errorMsg = lastError || credentials.lastError || "Unavailable"; + const status = lastStatus || Number(credentials.lastErrorCode) || HTTP_STATUS.SERVICE_UNAVAILABLE; + log.warn("IMAGE", `[${provider}/${model}] ${errorMsg} (${credentials.retryAfterHuman})`); + return unavailableResponse(status, `[${provider}/${model}] ${errorMsg}`, credentials.retryAfter, credentials.retryAfterHuman); + } + if (excludeConnectionIds.size === 0) { + log.error("AUTH", `No credentials for provider: ${provider}`); + return errorResponse(HTTP_STATUS.BAD_REQUEST, `No credentials for provider: ${provider}`); + } + log.warn("IMAGE", "No more accounts available", { provider }); + return errorResponse(lastStatus || HTTP_STATUS.SERVICE_UNAVAILABLE, lastError || "All accounts unavailable"); + } + + log.info("AUTH", `\x1b[32mUsing ${provider} account: ${credentials.connectionName}\x1b[0m`); + + const refreshedCredentials = await checkAndRefreshToken(provider, credentials); + + const result = await handleImageGenerationCore({ + body, + modelInfo: { provider, model }, + credentials: refreshedCredentials, + log, + onCredentialsRefreshed: async (newCreds) => { + await updateProviderCredentials(credentials.connectionId, { + accessToken: newCreds.accessToken, + refreshToken: newCreds.refreshToken, + providerSpecificData: newCreds.providerSpecificData, + testStatus: "active" + }); + }, + onRequestSuccess: async () => { + await clearAccountError(credentials.connectionId, credentials, model); + } + }); + + if (result.success) return result.response; + + const { shouldFallback } = await markAccountUnavailable(credentials.connectionId, result.status, result.error, provider, model); + + if (shouldFallback) { + log.warn("AUTH", `Account ${credentials.connectionName} unavailable (${result.status}), trying fallback`); + excludeConnectionIds.add(credentials.connectionId); + lastError = result.error; + lastStatus = result.status; + continue; + } + + return result.response; + } +} diff --git a/tests/unit/image-generation.test.js b/tests/unit/image-generation.test.js new file mode 100644 index 00000000..fe40ac03 --- /dev/null +++ b/tests/unit/image-generation.test.js @@ -0,0 +1,320 @@ +/** + * Unit tests for image generation handler + * + * Covers: + * - OpenAI-compatible format (openai, minimax, openrouter) + * - Gemini format (generateContent API) + * - Provider-specific formats (nanobanana, sdwebui) + * - Response normalization to OpenAI format + * - Error handling (missing prompt, invalid model) + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import { handleImageGenerationCore } from "../../open-sse/handlers/imageGenerationCore.js"; + +const originalFetch = global.fetch; + +describe("handleImageGenerationCore", () => { + beforeEach(() => { + global.fetch = vi.fn(); + }); + + afterEach(() => { + global.fetch = originalFetch; + }); + + it("validates required prompt field", async () => { + const result = await handleImageGenerationCore({ + body: { model: "openai/dall-e-3" }, + modelInfo: { provider: "openai", model: "dall-e-3" }, + credentials: { apiKey: "test-key" }, + log: null, + }); + + expect(result.success).toBe(false); + expect(result.status).toBe(400); + expect(result.error).toContain("Missing required field: prompt"); + }); + + it("rejects unsupported provider", async () => { + const result = await handleImageGenerationCore({ + body: { prompt: "test" }, + modelInfo: { provider: "unknown-provider", model: "test" }, + credentials: null, + log: null, + }); + + expect(result.success).toBe(false); + expect(result.status).toBe(400); + expect(result.error).toContain("does not support image generation"); + }); + + it("generates image with OpenAI format", async () => { + global.fetch.mockResolvedValueOnce( + new Response( + JSON.stringify({ + created: 1234567890, + data: [{ url: "https://example.com/image.png" }], + }), + { status: 200, headers: { "Content-Type": "application/json" } } + ) + ); + + const result = await handleImageGenerationCore({ + body: { prompt: "A cute cat", n: 1, size: "1024x1024" }, + modelInfo: { provider: "openai", model: "dall-e-3" }, + credentials: { apiKey: "test-key" }, + log: null, + }); + + expect(result.success).toBe(true); + expect(global.fetch).toHaveBeenCalledWith( + "https://api.openai.com/v1/images/generations", + expect.objectContaining({ + method: "POST", + headers: expect.objectContaining({ + "Content-Type": "application/json", + Authorization: "Bearer test-key", + }), + body: expect.stringContaining('"prompt":"A cute cat"'), + }) + ); + + const responseBody = await result.response.json(); + expect(responseBody.data).toHaveLength(1); + expect(responseBody.data[0].url).toBe("https://example.com/image.png"); + }); + + it("generates image with Gemini format", async () => { + global.fetch.mockResolvedValueOnce( + new Response( + JSON.stringify({ + candidates: [ + { + content: { + parts: [ + { text: "Generated image" }, + { inlineData: { data: "base64imagedata" } }, + ], + }, + }, + ], + }), + { status: 200, headers: { "Content-Type": "application/json" } } + ) + ); + + const result = await handleImageGenerationCore({ + body: { prompt: "A sunset" }, + modelInfo: { provider: "gemini", model: "gemini-image-preview" }, + credentials: { apiKey: "test-key" }, + log: null, + }); + + expect(result.success).toBe(true); + expect(global.fetch).toHaveBeenCalledWith( + expect.stringContaining("generativelanguage.googleapis.com"), + expect.objectContaining({ + method: "POST", + body: expect.stringContaining('"responseModalities":["TEXT","IMAGE"]'), + }) + ); + + const responseBody = await result.response.json(); + expect(responseBody.data).toHaveLength(1); + expect(responseBody.data[0].b64_json).toBe("base64imagedata"); + }); + + it("generates image with Minimax format", async () => { + global.fetch.mockResolvedValueOnce( + new Response( + JSON.stringify({ + created: 1234567890, + data: [{ url: "https://example.com/minimax.png" }], + }), + { status: 200, headers: { "Content-Type": "application/json" } } + ) + ); + + const result = await handleImageGenerationCore({ + body: { prompt: "A mountain", size: "1024x1024" }, + modelInfo: { provider: "minimax", model: "minimax-image-01" }, + credentials: { apiKey: "test-key" }, + log: null, + }); + + expect(result.success).toBe(true); + expect(global.fetch).toHaveBeenCalledWith( + "https://api.minimaxi.com/v1/images/generations", + expect.objectContaining({ + method: "POST", + headers: expect.objectContaining({ + Authorization: "Bearer test-key", + }), + }) + ); + }); + + it("generates image with NanoBanana format", async () => { + global.fetch.mockResolvedValueOnce( + new Response( + JSON.stringify({ image: "base64nanobanana" }), + { status: 200, headers: { "Content-Type": "application/json" } } + ) + ); + + const result = await handleImageGenerationCore({ + body: { prompt: "A robot", n: 2, size: "1024x1792" }, + modelInfo: { provider: "nanobanana", model: "nanobanana-flash" }, + credentials: { apiKey: "test-key" }, + log: null, + }); + + expect(result.success).toBe(true); + const fetchCall = global.fetch.mock.calls[0]; + const requestBody = JSON.parse(fetchCall[1].body); + expect(requestBody.type).toBe("TEXTTOIAMGE"); + expect(requestBody.numImages).toBe(2); + expect(requestBody.image_size).toBe("9:16"); + + const responseBody = await result.response.json(); + expect(responseBody.data[0].b64_json).toBe("base64nanobanana"); + }); + + it("generates image with SD WebUI format", async () => { + global.fetch.mockResolvedValueOnce( + new Response( + JSON.stringify({ images: ["base64sdwebui1", "base64sdwebui2"] }), + { status: 200, headers: { "Content-Type": "application/json" } } + ) + ); + + const result = await handleImageGenerationCore({ + body: { prompt: "A forest", size: "768x768", n: 2 }, + modelInfo: { provider: "sdwebui", model: "sdxl-base-1.0" }, + credentials: null, + log: null, + }); + + expect(result.success).toBe(true); + const fetchCall = global.fetch.mock.calls[0]; + const requestBody = JSON.parse(fetchCall[1].body); + expect(requestBody.width).toBe(768); + expect(requestBody.height).toBe(768); + expect(requestBody.batch_size).toBe(2); + + const responseBody = await result.response.json(); + expect(responseBody.data).toHaveLength(2); + }); + + it("handles OpenRouter with HTTP-Referer header", async () => { + global.fetch.mockResolvedValueOnce( + new Response( + JSON.stringify({ + created: 1234567890, + data: [{ url: "https://example.com/or.png" }], + }), + { status: 200, headers: { "Content-Type": "application/json" } } + ) + ); + + const result = await handleImageGenerationCore({ + body: { prompt: "A city" }, + modelInfo: { provider: "openrouter", model: "openai/dall-e-3" }, + credentials: { apiKey: "test-key" }, + log: null, + }); + + expect(result.success).toBe(true); + expect(global.fetch).toHaveBeenCalledWith( + "https://openrouter.ai/api/v1/images/generations", + expect.objectContaining({ + headers: expect.objectContaining({ + "HTTP-Referer": "https://endpoint-proxy.local", + "X-Title": "Endpoint Proxy", + }), + }) + ); + }); + + it("handles HuggingFace binary response", async () => { + const imageBuffer = new Uint8Array([0x89, 0x50, 0x4e, 0x47]); // PNG header + global.fetch.mockResolvedValueOnce( + new Response(imageBuffer, { + status: 200, + headers: { "Content-Type": "image/png" }, + }) + ); + + const result = await handleImageGenerationCore({ + body: { prompt: "A tree" }, + modelInfo: { provider: "huggingface", model: "black-forest-labs/FLUX.1-schnell" }, + credentials: { apiKey: "test-key" }, + log: null, + }); + + expect(result.success).toBe(true); + const responseBody = await result.response.json(); + expect(responseBody.data[0].b64_json).toBeTruthy(); + }); + + it("handles provider error responses", async () => { + global.fetch.mockResolvedValueOnce( + new Response( + JSON.stringify({ error: { message: "Rate limit exceeded" } }), + { status: 429, headers: { "Content-Type": "application/json" } } + ) + ); + + const result = await handleImageGenerationCore({ + body: { prompt: "test" }, + modelInfo: { provider: "openai", model: "dall-e-3" }, + credentials: { apiKey: "test-key" }, + log: null, + }); + + expect(result.success).toBe(false); + expect(result.status).toBe(429); + expect(result.error).toContain("Rate limit exceeded"); + }); + + it("handles network errors", async () => { + global.fetch.mockRejectedValueOnce(new Error("Network timeout")); + + const result = await handleImageGenerationCore({ + body: { prompt: "test" }, + modelInfo: { provider: "openai", model: "dall-e-3" }, + credentials: { apiKey: "test-key" }, + log: null, + }); + + expect(result.success).toBe(false); + expect(result.status).toBe(502); + expect(result.error).toContain("Network timeout"); + }); + + it("calls onRequestSuccess callback on success", async () => { + global.fetch.mockResolvedValueOnce( + new Response( + JSON.stringify({ + created: 1234567890, + data: [{ url: "https://example.com/success.png" }], + }), + { status: 200, headers: { "Content-Type": "application/json" } } + ) + ); + + const onRequestSuccess = vi.fn(); + + const result = await handleImageGenerationCore({ + body: { prompt: "test" }, + modelInfo: { provider: "openai", model: "dall-e-3" }, + credentials: { apiKey: "test-key" }, + log: null, + onRequestSuccess, + }); + + expect(result.success).toBe(true); + expect(onRequestSuccess).toHaveBeenCalledTimes(1); + }); +}); diff --git a/tests/vitest.config.js b/tests/vitest.config.js index e5cd9ae8..0df209bf 100644 --- a/tests/vitest.config.js +++ b/tests/vitest.config.js @@ -16,6 +16,8 @@ export default defineConfig({ alias: { // Resolve open-sse/* imports to the actual local package "open-sse": resolve(__dirname, "../open-sse"), + // Resolve @/* imports to src directory + "@": resolve(__dirname, "../src"), }, }, });