diff --git a/open-sse/config/providerModels.js b/open-sse/config/providerModels.js index 9fe764cb..f8ce257d 100644 --- a/open-sse/config/providerModels.js +++ b/open-sse/config/providerModels.js @@ -344,6 +344,16 @@ export const PROVIDER_MODELS = { { id: "GLM-4.7", name: "GLM-4.7" }, { id: "DeepSeek-V3.2", name: "DeepSeek-V3.2" }, ], + byteplus: [ + { id: "doubao-seed-2.0-pro", name: "Doubao Seed 2.0 Pro" }, + { id: "doubao-seed-2.0-code", name: "Doubao Seed 2.0 Code" }, + { id: "doubao-seed-2.0-lite", name: "Doubao Seed 2.0 Lite" }, + { id: "doubao-seed-code", name: "Doubao Seed Code" }, + { id: "glm-5.1", name: "GLM-5.1" }, + { id: "glm-4.7", name: "GLM-4.7" }, + { id: "kimi-k2.5", name: "Kimi-K2.5" }, + { id: "gpt-oss-120b", name: "GPT-OSS-120B" }, + ], deepseek: [ { id: "deepseek-v4-flash", name: "DeepSeek V4 Flash" }, { id: "deepseek-chat", name: "DeepSeek V3.2 Chat" }, diff --git a/open-sse/config/providers.js b/open-sse/config/providers.js index 9e3ba5f7..3422a21e 100644 --- a/open-sse/config/providers.js +++ b/open-sse/config/providers.js @@ -161,6 +161,11 @@ export const PROVIDERS = { format: "openai", headers: {} }, + byteplus: { + baseUrl: "https://ark.ap-southeast.bytepluses.com/api/coding/v3/chat/completions", + format: "openai", + headers: {} + }, github: { baseUrl: "https://api.githubcopilot.com/chat/completions", responsesUrl: "https://api.githubcopilot.com/responses", diff --git a/open-sse/services/model.js b/open-sse/services/model.js index 674d94a8..f4f0c54e 100644 --- a/open-sse/services/model.js +++ b/open-sse/services/model.js @@ -51,6 +51,8 @@ const ALIAS_TO_PROVIDER_ID = { chutes: "chutes", ark: "volcengine-ark", "volcengine-ark": "volcengine-ark", + byteplus: "byteplus", + bpm: "byteplus", cursor: "cursor", vx: "vertex", vertex: "vertex", diff --git a/public/providers/byteplus.png b/public/providers/byteplus.png new file mode 100755 index 00000000..a5dc899d Binary files /dev/null and b/public/providers/byteplus.png differ diff --git a/src/app/api/providers/[id]/models/route.js b/src/app/api/providers/[id]/models/route.js index 78ac8c5a..989845aa 100644 --- a/src/app/api/providers/[id]/models/route.js +++ b/src/app/api/providers/[id]/models/route.js @@ -150,6 +150,7 @@ const PROVIDER_MODELS_CONFIG = { parseResponse: (data) => data.data || [] }, "volcengine-ark": createOpenAIModelsConfig("https://ark.cn-beijing.volces.com/api/coding/v3/models"), + byteplus: createOpenAIModelsConfig("https://ark.ap-southeast.bytepluses.com/api/coding/v3/models"), // OpenAI-compatible API key providers deepseek: createOpenAIModelsConfig("https://api.deepseek.com/models"), diff --git a/src/app/api/providers/[id]/test/testUtils.js b/src/app/api/providers/[id]/test/testUtils.js index b27b50d7..7b647713 100644 --- a/src/app/api/providers/[id]/test/testUtils.js +++ b/src/app/api/providers/[id]/test/testUtils.js @@ -2,6 +2,7 @@ import { getProviderConnectionById, updateProviderConnection } from "@/lib/local import { resolveConnectionProxyConfig } from "@/lib/network/connectionProxy"; import { testProxyUrl } from "@/lib/network/proxyTest"; import { isOpenAICompatibleProvider, isAnthropicCompatibleProvider } from "@/shared/constants/providers"; +import { PROVIDER_ENDPOINTS } from "@/shared/constants/config"; import { getDefaultModel } from "open-sse/config/providerModels.js"; import { resolveOllamaLocalHost } from "open-sse/config/providers.js"; import { @@ -454,8 +455,9 @@ async function testApiKeyConnection(connection, effectiveProxy = null) { const valid = res.status !== 401 && res.status !== 403; return { valid, error: valid ? null : "Invalid API key" }; } - case "volcengine-ark": { - const res = await fetchWithConnectionProxy("https://ark.cn-beijing.volces.com/api/coding/v3/chat/completions", { + case "volcengine-ark": + case "byteplus": { + const res = await fetchWithConnectionProxy(PROVIDER_ENDPOINTS[connection.provider], { method: "POST", headers: { "Authorization": `Bearer ${connection.apiKey}`, "content-type": "application/json" }, body: JSON.stringify({ model: getDefaultModel(connection.provider), max_tokens: 1, messages: [{ role: "user", content: "test" }] }), diff --git a/src/app/api/providers/validate/route.js b/src/app/api/providers/validate/route.js index 688e32e8..0a73eda9 100644 --- a/src/app/api/providers/validate/route.js +++ b/src/app/api/providers/validate/route.js @@ -3,6 +3,7 @@ import { getProviderNodeById } from "@/models"; import { isOpenAICompatibleProvider, isAnthropicCompatibleProvider, isCustomEmbeddingProvider } from "@/shared/constants/providers"; import { getDefaultModel } from "open-sse/config/providerModels.js"; import { resolveOllamaLocalHost } from "open-sse/config/providers.js"; +import { PROVIDER_ENDPOINTS } from "@/shared/constants/config"; // POST /api/providers/validate - Validate API key with provider export async function POST(request) { @@ -211,16 +212,16 @@ export async function POST(request) { } break; } - case "volcengine-ark": { - const testModel = getDefaultModel(provider); - const res = await fetch("https://ark.cn-beijing.volces.com/api/coding/v3/chat/completions", { + case "volcengine-ark": + case "byteplus": { + const res = await fetch(PROVIDER_ENDPOINTS[provider], { method: "POST", headers: { "Authorization": `Bearer ${apiKey}`, "content-type": "application/json", }, body: JSON.stringify({ - model: testModel, + model: getDefaultModel(provider), max_tokens: 1, messages: [{ role: "user", content: "test" }], }), diff --git a/src/shared/constants/config.js b/src/shared/constants/config.js index 5b52cb7e..ed260498 100644 --- a/src/shared/constants/config.js +++ b/src/shared/constants/config.js @@ -67,6 +67,7 @@ export const PROVIDER_ENDPOINTS = { alicode: "https://coding.dashscope.aliyuncs.com/v1/chat/completions", "alicode-intl": "https://coding-intl.dashscope.aliyuncs.com/v1/chat/completions", "volcengine-ark": "https://ark.cn-beijing.volces.com/api/coding/v3/chat/completions", + byteplus: "https://ark.ap-southeast.bytepluses.com/api/coding/v3/chat/completions", openai: "https://api.openai.com/v1/chat/completions", anthropic: "https://api.anthropic.com/v1/messages", gemini: "https://generativelanguage.googleapis.com/v1beta/models", diff --git a/src/shared/constants/providers.js b/src/shared/constants/providers.js index 84ec8990..a9c06d6c 100644 --- a/src/shared/constants/providers.js +++ b/src/shared/constants/providers.js @@ -60,6 +60,7 @@ export const APIKEY_PROVIDERS = { alicode: { id: "alicode", alias: "alicode", name: "Alibaba", icon: "cloud", color: "#FF6A00", textIcon: "ALi" }, "alicode-intl": { id: "alicode-intl", alias: "alicode-intl", name: "Alibaba Intl", icon: "cloud", color: "#FF6A00", textIcon: "ALi" }, "volcengine-ark": { id: "volcengine-ark", alias: "ark", name: "Volcengine Ark", icon: "cloud", color: "#1677FF", textIcon: "ARK", website: "https://ark.cn-beijing.volces.com" }, + byteplus: { id: "byteplus", alias: "bpm", name: "BytePlus ModelArk", icon: "cloud", color: "#2563EB", textIcon: "BP", website: "https://console.byteplus.com/ark", serviceKinds: ["llm"] }, openai: { id: "openai", alias: "openai", name: "OpenAI", icon: "auto_awesome", color: "#10A37F", textIcon: "OA", website: "https://platform.openai.com", serviceKinds: ["llm", "embedding", "tts", "image", "imageToText", "webSearch"], thinkingConfig: THINKING_CONFIG.effort }, anthropic: { id: "anthropic", alias: "anthropic", name: "Anthropic", icon: "smart_toy", color: "#D97757", textIcon: "AN", website: "https://console.anthropic.com", serviceKinds: ["llm", "imageToText"] }, "opencode-go": { id: "opencode-go", alias: "ocg", name: "OpenCode Go", icon: "terminal", color: "#E87040", textIcon: "OC", website: "https://opencode.ai/auth", notice: { text: "OpenCode Go subscription: $5/mo (then $10/mo). Access to Kimi, GLM, Qwen, MiMo, MiniMax models.", apiKeyUrl: "https://opencode.ai/auth" } },