mirror of
https://github.com/decolua/9router.git
synced 2026-05-08 12:01:28 +00:00
feat: add OpenCode Go provider and support for custom models
- Introduced OpenCode Go provider with relevant configurations. - Enhanced model management by allowing users to add and delete custom models. - Updated UI components to support model selection for image types. - Adjusted sidebar visibility to include image media kinds.
This commit is contained in:
@@ -141,6 +141,18 @@ export const PROVIDER_MODELS = {
|
||||
{ id: "deepseek/deepseek-chat", name: "DeepSeek Chat" },
|
||||
{ id: "deepseek/deepseek-reasoner", name: "DeepSeek Reasoner" },
|
||||
],
|
||||
"opencode-go": [ // OpenCode Go subscription (API key)
|
||||
{ id: "kimi-k2.6", name: "Kimi K2.6" },
|
||||
{ id: "kimi-k2.5", name: "Kimi K2.5" },
|
||||
{ id: "glm-5.1", name: "GLM 5.1" },
|
||||
{ id: "glm-5", name: "GLM 5" },
|
||||
{ id: "qwen3.5-plus", name: "Qwen 3.5 Plus" },
|
||||
{ id: "qwen3.6-plus", name: "Qwen 3.6 Plus" },
|
||||
{ id: "mimo-v2-pro", name: "MiMo V2 Pro" },
|
||||
{ id: "mimo-v2-omni", name: "MiMo V2 Omni" },
|
||||
{ id: "minimax-m2.7", name: "MiniMax M2.7", targetFormat: "claude" },
|
||||
{ id: "minimax-m2.5", name: "MiniMax M2.5", targetFormat: "claude" },
|
||||
],
|
||||
oc: [ // OpenCode
|
||||
// { id: "nemotron-3-super-free", name: "Nemotron 3 Super" },
|
||||
// { id: "qwen3.6-plus-free", name: "Qwen 3.6 Plus" },
|
||||
@@ -192,6 +204,10 @@ export const PROVIDER_MODELS = {
|
||||
{ id: "tts-1", name: "TTS-1", type: "tts" },
|
||||
{ id: "tts-1-hd", name: "TTS-1 HD", type: "tts" },
|
||||
{ id: "gpt-4o-mini-tts", name: "GPT-4o Mini TTS", type: "tts" },
|
||||
// Image models
|
||||
{ id: "gpt-image-1", name: "GPT Image 1", type: "image" },
|
||||
{ id: "dall-e-3", name: "DALL-E 3", type: "image" },
|
||||
{ id: "dall-e-2", name: "DALL-E 2", type: "image" },
|
||||
],
|
||||
anthropic: [
|
||||
{ id: "claude-sonnet-4-20250514", name: "Claude Sonnet 4" },
|
||||
@@ -219,6 +235,10 @@ export const PROVIDER_MODELS = {
|
||||
{ id: "gemini-embedding-001", name: "Gemini Embedding 001", type: "embedding" },
|
||||
{ id: "text-embedding-005", name: "Text Embedding 005", type: "embedding" },
|
||||
{ id: "text-embedding-004", name: "Text Embedding 004 (Legacy)", type: "embedding" },
|
||||
// Image models (Nano Banana)
|
||||
{ id: "gemini-3.1-flash-image-preview", name: "Gemini 3.1 Flash Image (Nano Banana 2)", type: "image" },
|
||||
{ id: "gemini-3-pro-image-preview", name: "Gemini 3 Pro Image (Nano Banana Pro)", type: "image" },
|
||||
{ id: "gemini-2.5-flash-image", name: "Gemini 2.5 Flash Image (Nano Banana)", type: "image" },
|
||||
],
|
||||
openrouter: [
|
||||
// Embedding models
|
||||
@@ -233,6 +253,11 @@ export const PROVIDER_MODELS = {
|
||||
{ id: "openai/gpt-4o-mini-tts", name: "GPT-4o Mini TTS", type: "tts" },
|
||||
{ id: "openai/tts-1-hd", name: "TTS-1 HD", type: "tts" },
|
||||
{ id: "openai/tts-1", name: "TTS-1", type: "tts" },
|
||||
// Image models
|
||||
{ id: "openai/dall-e-3", name: "DALL-E 3 (via OpenRouter)", type: "image" },
|
||||
{ id: "openai/gpt-image-1", name: "GPT Image 1 (via OpenRouter)", type: "image" },
|
||||
{ id: "google/imagen-3.0-generate-002", name: "Imagen 3 (via OpenRouter)", type: "image" },
|
||||
{ id: "black-forest-labs/FLUX.1-schnell", name: "FLUX.1 Schnell (via OpenRouter)", type: "image" },
|
||||
],
|
||||
glm: [
|
||||
{ id: "glm-5.1", name: "GLM 5.1" },
|
||||
@@ -256,6 +281,8 @@ export const PROVIDER_MODELS = {
|
||||
{ id: "MiniMax-M2.7", name: "MiniMax M2.7" },
|
||||
{ id: "MiniMax-M2.5", name: "MiniMax M2.5" },
|
||||
{ id: "MiniMax-M2.1", name: "MiniMax M2.1" },
|
||||
// Image models
|
||||
{ id: "minimax-image-01", name: "MiniMax Image 01", type: "image" },
|
||||
],
|
||||
blackbox: [
|
||||
{ id: "gpt-4o", name: "GPT-4o" },
|
||||
@@ -424,6 +451,24 @@ export const PROVIDER_MODELS = {
|
||||
|
||||
// TTS entries are loaded from ttsModels.js via buildTtsProviderModels()
|
||||
...buildTtsProviderModels(),
|
||||
|
||||
// Image providers
|
||||
nanobanana: [
|
||||
{ id: "nanobanana-flash", name: "NanoBanana Flash", type: "image" },
|
||||
{ id: "nanobanana-pro", name: "NanoBanana Pro", type: "image" },
|
||||
],
|
||||
sdwebui: [
|
||||
{ id: "stable-diffusion-v1-5", name: "Stable Diffusion v1.5", type: "image" },
|
||||
{ id: "sdxl-base-1.0", name: "SDXL Base 1.0", type: "image" },
|
||||
],
|
||||
comfyui: [
|
||||
{ id: "flux-dev", name: "FLUX Dev", type: "image" },
|
||||
{ id: "sdxl", name: "SDXL", type: "image" },
|
||||
],
|
||||
huggingface: [
|
||||
{ id: "black-forest-labs/FLUX.1-schnell", name: "FLUX.1 Schnell", type: "image" },
|
||||
{ id: "stabilityai/stable-diffusion-xl-base-1.0", name: "SDXL Base 1.0", type: "image" },
|
||||
],
|
||||
};
|
||||
|
||||
// Helper functions
|
||||
|
||||
@@ -337,6 +337,11 @@ export const PROVIDERS = {
|
||||
headers: { "x-opencode-client": "desktop" },
|
||||
noAuth: true
|
||||
},
|
||||
"opencode-go": {
|
||||
baseUrl: "https://opencode.ai/zen/go/v1/chat/completions",
|
||||
format: "openai",
|
||||
headers: {}
|
||||
},
|
||||
"grok-web": {
|
||||
baseUrl: "https://grok.com/rest/app-chat/conversations/new",
|
||||
format: "grok-web",
|
||||
|
||||
@@ -9,6 +9,7 @@ import { CursorExecutor } from "./cursor.js";
|
||||
import { VertexExecutor } from "./vertex.js";
|
||||
import { QwenExecutor } from "./qwen.js";
|
||||
import { OpenCodeExecutor } from "./opencode.js";
|
||||
import { OpenCodeGoExecutor } from "./opencode-go.js";
|
||||
import { GrokWebExecutor } from "./grok-web.js";
|
||||
import { PerplexityWebExecutor } from "./perplexity-web.js";
|
||||
import { DefaultExecutor } from "./default.js";
|
||||
@@ -27,6 +28,7 @@ const executors = {
|
||||
"vertex-partner": new VertexExecutor("vertex-partner"),
|
||||
qwen: new QwenExecutor(),
|
||||
opencode: new OpenCodeExecutor(),
|
||||
"opencode-go": new OpenCodeGoExecutor(),
|
||||
"grok-web": new GrokWebExecutor(),
|
||||
"perplexity-web": new PerplexityWebExecutor(),
|
||||
};
|
||||
@@ -56,5 +58,6 @@ export { VertexExecutor } from "./vertex.js";
|
||||
export { DefaultExecutor } from "./default.js";
|
||||
export { QwenExecutor } from "./qwen.js";
|
||||
export { OpenCodeExecutor } from "./opencode.js";
|
||||
export { OpenCodeGoExecutor } from "./opencode-go.js";
|
||||
export { GrokWebExecutor } from "./grok-web.js";
|
||||
export { PerplexityWebExecutor } from "./perplexity-web.js";
|
||||
|
||||
51
open-sse/executors/opencode-go.js
Normal file
51
open-sse/executors/opencode-go.js
Normal file
@@ -0,0 +1,51 @@
|
||||
import { BaseExecutor } from "./base.js";
|
||||
import { PROVIDERS } from "../config/providers.js";
|
||||
|
||||
// Models that use /zen/go/v1/messages (Anthropic/Claude format + x-api-key auth)
|
||||
const CLAUDE_FORMAT_MODELS = new Set(["minimax-m2.5", "minimax-m2.7"]);
|
||||
|
||||
const BASE = "https://opencode.ai/zen/go/v1";
|
||||
|
||||
// Kimi (Moonshot) requires reasoning_content on assistant tool_call messages when thinking is on.
|
||||
// OpenAI-format clients don't send it -> upstream 400. Inject a non-empty placeholder.
|
||||
const KIMI_REASONING_PLACEHOLDER = " ";
|
||||
|
||||
export class OpenCodeGoExecutor extends BaseExecutor {
|
||||
constructor() {
|
||||
super("opencode-go", PROVIDERS["opencode-go"]);
|
||||
}
|
||||
|
||||
// buildUrl runs before buildHeaders in BaseExecutor.execute, cache model here
|
||||
buildUrl(model) {
|
||||
this._lastModel = model;
|
||||
return CLAUDE_FORMAT_MODELS.has(model)
|
||||
? `${BASE}/messages`
|
||||
: `${BASE}/chat/completions`;
|
||||
}
|
||||
|
||||
buildHeaders(credentials, stream = true) {
|
||||
const key = credentials?.apiKey || credentials?.accessToken;
|
||||
const headers = { "Content-Type": "application/json" };
|
||||
|
||||
if (CLAUDE_FORMAT_MODELS.has(this._lastModel)) {
|
||||
headers["x-api-key"] = key;
|
||||
headers["anthropic-version"] = "2023-06-01";
|
||||
} else {
|
||||
headers["Authorization"] = `Bearer ${key}`;
|
||||
}
|
||||
|
||||
if (stream) headers["Accept"] = "text/event-stream";
|
||||
return headers;
|
||||
}
|
||||
|
||||
transformRequest(model, body) {
|
||||
if (!model?.startsWith?.("kimi-") || !body?.messages) return body;
|
||||
const messages = body.messages.map(m => {
|
||||
if (m?.role === "assistant" && Array.isArray(m.tool_calls) && !("reasoning_content" in m)) {
|
||||
return { ...m, reasoning_content: KIMI_REASONING_PLACEHOLDER };
|
||||
}
|
||||
return m;
|
||||
});
|
||||
return { ...body, messages };
|
||||
}
|
||||
}
|
||||
320
open-sse/handlers/imageGenerationCore.js
Normal file
320
open-sse/handlers/imageGenerationCore.js
Normal file
@@ -0,0 +1,320 @@
|
||||
import { createErrorResult, parseUpstreamError, formatProviderError } from "../utils/error.js";
|
||||
import { HTTP_STATUS } from "../config/runtimeConfig.js";
|
||||
import { refreshWithRetry } from "../services/tokenRefresh.js";
|
||||
import { getExecutor } from "../executors/index.js";
|
||||
|
||||
// Image provider configurations
|
||||
const IMAGE_PROVIDERS = {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1/images/generations",
|
||||
format: "openai",
|
||||
},
|
||||
gemini: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta/models",
|
||||
format: "gemini",
|
||||
},
|
||||
minimax: {
|
||||
baseUrl: "https://api.minimaxi.com/v1/images/generations",
|
||||
format: "openai",
|
||||
},
|
||||
openrouter: {
|
||||
baseUrl: "https://openrouter.ai/api/v1/images/generations",
|
||||
format: "openai",
|
||||
},
|
||||
nanobanana: {
|
||||
baseUrl: "https://api.nanobananaapi.ai/api/v1/nanobanana/generate",
|
||||
format: "nanobanana",
|
||||
},
|
||||
sdwebui: {
|
||||
baseUrl: "http://localhost:7860/sdapi/v1/txt2img",
|
||||
format: "sdwebui",
|
||||
},
|
||||
comfyui: {
|
||||
baseUrl: "http://localhost:8188",
|
||||
format: "comfyui",
|
||||
},
|
||||
huggingface: {
|
||||
baseUrl: "https://api-inference.huggingface.co/models",
|
||||
format: "huggingface",
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Build image generation URL
|
||||
*/
|
||||
function buildImageUrl(provider, model, credentials) {
|
||||
const config = IMAGE_PROVIDERS[provider];
|
||||
if (!config) return null;
|
||||
|
||||
switch (provider) {
|
||||
case "gemini": {
|
||||
const apiKey = credentials?.apiKey || credentials?.accessToken;
|
||||
const modelId = model.replace(/^models\//, "");
|
||||
return `${config.baseUrl}/${modelId}:generateContent?key=${encodeURIComponent(apiKey)}`;
|
||||
}
|
||||
case "huggingface":
|
||||
return `${config.baseUrl}/${model}`;
|
||||
default:
|
||||
return config.baseUrl;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build request headers
|
||||
*/
|
||||
function buildImageHeaders(provider, credentials) {
|
||||
const headers = { "Content-Type": "application/json" };
|
||||
|
||||
if (provider === "gemini") {
|
||||
return headers;
|
||||
}
|
||||
|
||||
if (provider === "openrouter") {
|
||||
headers["Authorization"] = `Bearer ${credentials?.apiKey || credentials?.accessToken}`;
|
||||
headers["HTTP-Referer"] = "https://endpoint-proxy.local";
|
||||
headers["X-Title"] = "Endpoint Proxy";
|
||||
return headers;
|
||||
}
|
||||
|
||||
if (provider === "huggingface") {
|
||||
headers["Authorization"] = `Bearer ${credentials?.apiKey || credentials?.accessToken}`;
|
||||
return headers;
|
||||
}
|
||||
|
||||
if (credentials?.apiKey || credentials?.accessToken) {
|
||||
headers["Authorization"] = `Bearer ${credentials.apiKey || credentials.accessToken}`;
|
||||
}
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build request body based on provider format
|
||||
*/
|
||||
function buildImageBody(provider, model, body) {
|
||||
const { prompt, n = 1, size = "1024x1024", quality, style, response_format } = body;
|
||||
|
||||
switch (provider) {
|
||||
case "gemini":
|
||||
return {
|
||||
contents: [{ parts: [{ text: prompt }] }],
|
||||
generationConfig: {
|
||||
responseModalities: ["TEXT", "IMAGE"],
|
||||
},
|
||||
};
|
||||
|
||||
case "sdwebui": {
|
||||
const [width, height] = size.split("x").map(Number);
|
||||
return {
|
||||
prompt,
|
||||
width: width || 512,
|
||||
height: height || 512,
|
||||
steps: 20,
|
||||
batch_size: n,
|
||||
};
|
||||
}
|
||||
|
||||
case "nanobanana": {
|
||||
const sizeMap = {
|
||||
"1024x1024": "1:1",
|
||||
"1024x1792": "9:16",
|
||||
"1792x1024": "16:9",
|
||||
};
|
||||
return {
|
||||
prompt,
|
||||
type: "TEXTTOIAMGE",
|
||||
numImages: n,
|
||||
image_size: sizeMap[size] || "1:1",
|
||||
};
|
||||
}
|
||||
|
||||
default:
|
||||
// OpenAI-compatible format
|
||||
const requestBody = { model, prompt, n, size };
|
||||
if (quality) requestBody.quality = quality;
|
||||
if (style) requestBody.style = style;
|
||||
if (response_format) requestBody.response_format = response_format;
|
||||
return requestBody;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize response to OpenAI format
|
||||
*/
|
||||
function normalizeImageResponse(responseBody, provider, prompt) {
|
||||
// Already in OpenAI format
|
||||
if (responseBody.created && Array.isArray(responseBody.data)) {
|
||||
return responseBody;
|
||||
}
|
||||
|
||||
const timestamp = Math.floor(Date.now() / 1000);
|
||||
|
||||
switch (provider) {
|
||||
case "gemini": {
|
||||
const parts = responseBody.candidates?.[0]?.content?.parts || [];
|
||||
const images = parts
|
||||
.filter((p) => p.inlineData?.data)
|
||||
.map((p) => ({ b64_json: p.inlineData.data }));
|
||||
return {
|
||||
created: timestamp,
|
||||
data: images.length > 0 ? images : [{ b64_json: "", revised_prompt: prompt }],
|
||||
};
|
||||
}
|
||||
|
||||
case "sdwebui": {
|
||||
const images = Array.isArray(responseBody.images)
|
||||
? responseBody.images.map((img) => ({ b64_json: img }))
|
||||
: [];
|
||||
return { created: timestamp, data: images };
|
||||
}
|
||||
|
||||
case "nanobanana": {
|
||||
if (responseBody.image) {
|
||||
return {
|
||||
created: timestamp,
|
||||
data: [{ b64_json: responseBody.image, revised_prompt: prompt }],
|
||||
};
|
||||
}
|
||||
return { created: timestamp, data: [] };
|
||||
}
|
||||
|
||||
case "huggingface": {
|
||||
// HuggingFace returns binary image data
|
||||
return responseBody;
|
||||
}
|
||||
|
||||
default:
|
||||
return responseBody;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Core image generation handler
|
||||
* @param {object} options
|
||||
* @param {object} options.body - Request body { model, prompt, n, size, ... }
|
||||
* @param {object} options.modelInfo - { provider, model }
|
||||
* @param {object} options.credentials - Provider credentials
|
||||
* @param {object} [options.log] - Logger
|
||||
* @param {function} [options.onCredentialsRefreshed] - Called when creds are refreshed
|
||||
* @param {function} [options.onRequestSuccess] - Called on success
|
||||
* @returns {Promise<{ success: boolean, response: Response, status?: number, error?: string }>}
|
||||
*/
|
||||
export async function handleImageGenerationCore({
|
||||
body,
|
||||
modelInfo,
|
||||
credentials,
|
||||
log,
|
||||
onCredentialsRefreshed,
|
||||
onRequestSuccess,
|
||||
}) {
|
||||
const { provider, model } = modelInfo;
|
||||
|
||||
if (!body.prompt) {
|
||||
return createErrorResult(HTTP_STATUS.BAD_REQUEST, "Missing required field: prompt");
|
||||
}
|
||||
|
||||
const url = buildImageUrl(provider, model, credentials);
|
||||
if (!url) {
|
||||
return createErrorResult(
|
||||
HTTP_STATUS.BAD_REQUEST,
|
||||
`Provider '${provider}' does not support image generation`
|
||||
);
|
||||
}
|
||||
|
||||
const headers = buildImageHeaders(provider, credentials);
|
||||
const requestBody = buildImageBody(provider, model, body);
|
||||
|
||||
log?.debug?.("IMAGE", `${provider.toUpperCase()} | ${model} | prompt="${body.prompt.slice(0, 50)}..."`);
|
||||
|
||||
let providerResponse;
|
||||
try {
|
||||
providerResponse = await fetch(url, {
|
||||
method: "POST",
|
||||
headers,
|
||||
body: JSON.stringify(requestBody),
|
||||
});
|
||||
} catch (error) {
|
||||
const errMsg = formatProviderError(error, provider, model, HTTP_STATUS.BAD_GATEWAY);
|
||||
log?.debug?.("IMAGE", `Fetch error: ${errMsg}`);
|
||||
return createErrorResult(HTTP_STATUS.BAD_GATEWAY, errMsg);
|
||||
}
|
||||
|
||||
// Handle 401/403 — try token refresh
|
||||
const executor = getExecutor(provider);
|
||||
if (
|
||||
!executor?.noAuth &&
|
||||
(providerResponse.status === HTTP_STATUS.UNAUTHORIZED ||
|
||||
providerResponse.status === HTTP_STATUS.FORBIDDEN)
|
||||
) {
|
||||
const newCredentials = await refreshWithRetry(
|
||||
() => executor.refreshCredentials(credentials, log),
|
||||
3,
|
||||
log
|
||||
);
|
||||
|
||||
if (newCredentials?.accessToken || newCredentials?.apiKey) {
|
||||
log?.info?.("TOKEN", `${provider.toUpperCase()} | refreshed for image generation`);
|
||||
Object.assign(credentials, newCredentials);
|
||||
if (onCredentialsRefreshed && newCredentials) {
|
||||
await onCredentialsRefreshed(newCredentials);
|
||||
}
|
||||
|
||||
try {
|
||||
const retryHeaders = buildImageHeaders(provider, credentials);
|
||||
const retryUrl = provider === "gemini" ? buildImageUrl(provider, model, credentials) : url;
|
||||
|
||||
providerResponse = await fetch(retryUrl, {
|
||||
method: "POST",
|
||||
headers: retryHeaders,
|
||||
body: JSON.stringify(requestBody),
|
||||
});
|
||||
} catch (retryError) {
|
||||
log?.warn?.("TOKEN", `${provider.toUpperCase()} | retry after refresh failed`);
|
||||
}
|
||||
} else {
|
||||
log?.warn?.("TOKEN", `${provider.toUpperCase()} | refresh failed`);
|
||||
}
|
||||
}
|
||||
|
||||
if (!providerResponse.ok) {
|
||||
const { statusCode, message } = await parseUpstreamError(providerResponse);
|
||||
const errMsg = formatProviderError(new Error(message), provider, model, statusCode);
|
||||
log?.debug?.("IMAGE", `Provider error: ${errMsg}`);
|
||||
return createErrorResult(statusCode, errMsg);
|
||||
}
|
||||
|
||||
let responseBody;
|
||||
try {
|
||||
// HuggingFace returns binary image data
|
||||
if (provider === "huggingface") {
|
||||
const buffer = await providerResponse.arrayBuffer();
|
||||
const base64 = Buffer.from(buffer).toString("base64");
|
||||
responseBody = {
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
data: [{ b64_json: base64 }],
|
||||
};
|
||||
} else {
|
||||
responseBody = await providerResponse.json();
|
||||
}
|
||||
} catch (parseError) {
|
||||
return createErrorResult(HTTP_STATUS.BAD_GATEWAY, `Invalid response from ${provider}`);
|
||||
}
|
||||
|
||||
if (onRequestSuccess) {
|
||||
await onRequestSuccess();
|
||||
}
|
||||
|
||||
const normalized = normalizeImageResponse(responseBody, provider, body.prompt);
|
||||
|
||||
log?.debug?.("IMAGE", `Success | images=${normalized.data?.length || 0}`);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
response: new Response(JSON.stringify(normalized), {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Access-Control-Allow-Origin": "*",
|
||||
},
|
||||
}),
|
||||
};
|
||||
}
|
||||
@@ -13,6 +13,7 @@ const ALIAS_TO_PROVIDER_ID = {
|
||||
kmc: "kimi-coding",
|
||||
cl: "cline",
|
||||
oc: "opencode",
|
||||
ocg: "opencode-go",
|
||||
// TTS providers
|
||||
el: "elevenlabs",
|
||||
// API Key providers
|
||||
|
||||
Reference in New Issue
Block a user