diff --git a/open-sse/config/providerModels.js b/open-sse/config/providerModels.js index a0888ed7..5b91396e 100644 --- a/open-sse/config/providerModels.js +++ b/open-sse/config/providerModels.js @@ -408,6 +408,19 @@ export const PROVIDER_MODELS = { { id: "deepseek-chat", name: "DeepSeek V3.2 Chat" }, { id: "deepseek-reasoner", name: "DeepSeek V3.2 Reasoner" }, ], + commandcode: [ + { id: "deepseek/deepseek-v4-pro", name: "DeepSeek V4 Pro" }, + { id: "deepseek/deepseek-v4-flash", name: "DeepSeek V4 Flash" }, + { id: "moonshotai/Kimi-K2.6", name: "Kimi K2.6" }, + { id: "moonshotai/Kimi-K2.5", name: "Kimi K2.5" }, + { id: "zai-org/GLM-5.1", name: "GLM 5.1" }, + { id: "zai-org/GLM-5", name: "GLM 5" }, + { id: "MiniMaxAI/MiniMax-M2.7", name: "MiniMax M2.7" }, + { id: "MiniMaxAI/MiniMax-M2.5", name: "MiniMax M2.5" }, + { id: "Qwen/Qwen3.6-Max-Preview", name: "Qwen 3.6 Max Preview" }, + { id: "Qwen/Qwen3.6-Plus", name: "Qwen 3.6 Plus" }, + { id: "stepfun/Step-3.5-Flash", name: "Step 3.5 Flash" }, + ], groq: [ { id: "llama-3.3-70b-versatile", name: "Llama 3.3 70B" }, { id: "meta-llama/llama-4-maverick-17b-128e-instruct", name: "Llama 4 Maverick" }, diff --git a/open-sse/config/providers.js b/open-sse/config/providers.js index 323291db..4c844c5f 100644 --- a/open-sse/config/providers.js +++ b/open-sse/config/providers.js @@ -251,6 +251,14 @@ export const PROVIDERS = { baseUrl: "https://api.deepseek.com/chat/completions", format: "openai" }, + commandcode: { + baseUrl: "https://api.commandcode.ai/alpha/generate", + format: "commandcode", + headers: { + "x-command-code-version": "0.25.7", + "x-cli-environment": "cli" + } + }, groq: { baseUrl: "https://api.groq.com/openai/v1/chat/completions", format: "openai" diff --git a/open-sse/executors/commandcode.js b/open-sse/executors/commandcode.js new file mode 100644 index 00000000..e0c10e88 --- /dev/null +++ b/open-sse/executors/commandcode.js @@ -0,0 +1,88 @@ +import { randomUUID } from "crypto"; +import { BaseExecutor } from "./base.js"; +import { PROVIDERS } from "../config/providers.js"; +import { convertCommandCodeToOpenAI } from "../translator/response/commandcode-to-openai.js"; + +/** + * CommandCodeExecutor — talks to https://api.commandcode.ai/alpha/generate + * + * Auth: Bearer API key (stored as the connection's apiKey). + * Adds the per-request `x-session-id` header expected by CommandCode upstream. + * + * Upstream returns AI SDK v5 NDJSON (one JSON event per line, no `data:` prefix). + * We translate each event to an OpenAI chat.completion.chunk and emit it as SSE so + * both the streaming and non-streaming (forced SSE → JSON) downstream handlers in + * 9router can consume it without further format translation. + */ +export class CommandCodeExecutor extends BaseExecutor { + constructor() { + super("commandcode", PROVIDERS.commandcode); + } + + buildHeaders(credentials, stream = true) { + const headers = { + "Content-Type": "application/json", + ...(this.config.headers || {}), + "x-session-id": randomUUID(), + }; + + const token = credentials?.apiKey || credentials?.accessToken; + if (token) headers["Authorization"] = `Bearer ${token}`; + + if (stream) headers["Accept"] = "text/event-stream"; + return headers; + } + + async execute(opts) { + const result = await super.execute(opts); + if (!result?.response?.ok || !result.response.body) return result; + result.response = wrapNdjsonAsOpenAISse(result.response, opts.model); + return result; + } +} + +function wrapNdjsonAsOpenAISse(originalResponse, model) { + const decoder = new TextDecoder(); + const encoder = new TextEncoder(); + let buffer = ""; + const state = { model }; + + const emitChunks = (chunks, controller) => { + if (!chunks) return; + const list = Array.isArray(chunks) ? chunks : [chunks]; + for (const c of list) { + if (c == null) continue; + controller.enqueue(encoder.encode(`data: ${JSON.stringify(c)}\n\n`)); + } + }; + + const transform = new TransformStream({ + transform(chunk, controller) { + buffer += decoder.decode(chunk, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed) continue; + // Translate AI SDK v5 NDJSON line to one or more OpenAI chunks + emitChunks(convertCommandCodeToOpenAI(trimmed, state), controller); + } + }, + flush(controller) { + const trimmed = buffer.trim(); + if (trimmed) { + emitChunks(convertCommandCodeToOpenAI(trimmed, state), controller); + } + controller.enqueue(encoder.encode("data: [DONE]\n\n")); + }, + }); + + const newBody = originalResponse.body.pipeThrough(transform); + return new Response(newBody, { + status: originalResponse.status, + statusText: originalResponse.statusText, + headers: originalResponse.headers, + }); +} + +export default CommandCodeExecutor; diff --git a/open-sse/executors/index.js b/open-sse/executors/index.js index 9479f4a5..78372cbf 100644 --- a/open-sse/executors/index.js +++ b/open-sse/executors/index.js @@ -14,6 +14,7 @@ import { OpenCodeGoExecutor } from "./opencode-go.js"; import { GrokWebExecutor } from "./grok-web.js"; import { PerplexityWebExecutor } from "./perplexity-web.js"; import { OllamaLocalExecutor } from "./ollama-local.js"; +import { CommandCodeExecutor } from "./commandcode.js"; import { DefaultExecutor } from "./default.js"; const executors = { @@ -35,6 +36,7 @@ const executors = { "grok-web": new GrokWebExecutor(), "perplexity-web": new PerplexityWebExecutor(), "ollama-local": new OllamaLocalExecutor(), + commandcode: new CommandCodeExecutor(), }; const defaultCache = new Map(); @@ -67,3 +69,4 @@ export { OpenCodeGoExecutor } from "./opencode-go.js"; export { GrokWebExecutor } from "./grok-web.js"; export { PerplexityWebExecutor } from "./perplexity-web.js"; export { OllamaLocalExecutor } from "./ollama-local.js"; +export { CommandCodeExecutor } from "./commandcode.js"; diff --git a/open-sse/handlers/chatCore.js b/open-sse/handlers/chatCore.js index 338e92eb..ca59cb99 100644 --- a/open-sse/handlers/chatCore.js +++ b/open-sse/handlers/chatCore.js @@ -56,7 +56,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred } const clientRequestedStreaming = body.stream === true || sourceFormat === FORMATS.ANTIGRAVITY || sourceFormat === FORMATS.GEMINI || sourceFormat === FORMATS.GEMINI_CLI; - const providerRequiresStreaming = provider === "openai" || provider === "codex"; + const providerRequiresStreaming = provider === "openai" || provider === "codex" || provider === "commandcode"; let stream = providerRequiresStreaming ? true : (body.stream !== false); // Check client Accept header preference for non-streaming requests diff --git a/open-sse/services/model.js b/open-sse/services/model.js index c127fe7f..234ed563 100644 --- a/open-sse/services/model.js +++ b/open-sse/services/model.js @@ -27,6 +27,8 @@ const ALIAS_TO_PROVIDER_ID = { "minimax-cn": "minimax-cn", ds: "deepseek", deepseek: "deepseek", + cmc: "commandcode", + commandcode: "commandcode", groq: "groq", xai: "xai", mistral: "mistral", diff --git a/open-sse/translator/formats.js b/open-sse/translator/formats.js index f5594183..89367d00 100644 --- a/open-sse/translator/formats.js +++ b/open-sse/translator/formats.js @@ -11,7 +11,8 @@ export const FORMATS = { ANTIGRAVITY: "antigravity", KIRO: "kiro", CURSOR: "cursor", - OLLAMA: "ollama" + OLLAMA: "ollama", + COMMANDCODE: "commandcode" }; /** diff --git a/open-sse/translator/index.js b/open-sse/translator/index.js index a94a00ca..d581bd7e 100644 --- a/open-sse/translator/index.js +++ b/open-sse/translator/index.js @@ -40,6 +40,7 @@ function ensureInitialized() { require("./request/openai-to-kiro.js"); require("./request/openai-to-cursor.js"); require("./request/openai-to-ollama.js"); + require("./request/openai-to-commandcode.js"); // Response translators require("./response/claude-to-openai.js"); @@ -50,6 +51,7 @@ function ensureInitialized() { require("./response/kiro-to-openai.js"); require("./response/cursor-to-openai.js"); require("./response/ollama-to-openai.js"); + require("./response/commandcode-to-openai.js"); } // Strip specific content types from messages (explicit opt-in via strip[] in PROVIDER_MODELS) diff --git a/open-sse/translator/request/openai-to-commandcode.js b/open-sse/translator/request/openai-to-commandcode.js new file mode 100644 index 00000000..6dd6e266 --- /dev/null +++ b/open-sse/translator/request/openai-to-commandcode.js @@ -0,0 +1,81 @@ +/** + * OpenAI → CommandCode request translator + * + * CommandCode endpoint expects an envelope: + * { threadId, memory, config, params: { model, messages, stream, max_tokens, temperature, tools? } } + * where `params.messages` are Anthropic-style content blocks ([{type:"text", text}, ...]). + * + * The model id received here is the upstream id (e.g. "deepseek/deepseek-v4-pro") thanks to the + * `provider/model` registration in providerModels.js. + */ +import { register } from "../index.js"; +import { FORMATS } from "../formats.js"; +import { randomUUID } from "crypto"; + +function toContentBlocks(content) { + if (content == null) return [{ type: "text", text: "" }]; + if (typeof content === "string") return [{ type: "text", text: content }]; + if (Array.isArray(content)) { + const blocks = []; + for (const part of content) { + if (typeof part === "string") { + blocks.push({ type: "text", text: part }); + } else if (part && typeof part === "object") { + if (part.type === "text" && typeof part.text === "string") { + blocks.push({ type: "text", text: part.text }); + } else if (part.type === "image_url" || part.type === "image") { + // CommandCode currently rejects multimodal blocks via this gateway; + // collapse to a textual placeholder so the request still validates. + blocks.push({ type: "text", text: "[image omitted]" }); + } else if (typeof part.text === "string") { + blocks.push({ type: "text", text: part.text }); + } + } + } + return blocks.length ? blocks : [{ type: "text", text: "" }]; + } + return [{ type: "text", text: String(content) }]; +} + +function convertMessages(messages = []) { + return messages.map((m) => { + const role = m.role === "tool" ? "user" : (m.role || "user"); + return { role, content: toContentBlocks(m.content) }; + }); +} + +export function openaiToCommandCode(model, body, stream /* , credentials */) { + const params = { + model, + messages: convertMessages(body.messages), + stream: stream !== false, + max_tokens: body.max_tokens ?? body.max_output_tokens ?? 64000, + temperature: body.temperature ?? 0.3, + }; + + if (Array.isArray(body.tools) && body.tools.length > 0) { + params.tools = body.tools; + } + if (body.top_p != null) params.top_p = body.top_p; + + const today = new Date().toISOString().slice(0, 10); + + return { + threadId: randomUUID(), + memory: "", + config: { + workingDir: process.cwd(), + date: today, + environment: process.platform, + structure: [], + isGitRepo: false, + currentBranch: "", + mainBranch: "", + gitStatus: "", + recentCommits: [], + }, + params, + }; +} + +register(FORMATS.OPENAI, FORMATS.COMMANDCODE, openaiToCommandCode, null); diff --git a/open-sse/translator/response/commandcode-to-openai.js b/open-sse/translator/response/commandcode-to-openai.js new file mode 100644 index 00000000..1290b9ba --- /dev/null +++ b/open-sse/translator/response/commandcode-to-openai.js @@ -0,0 +1,194 @@ +/** + * CommandCode → OpenAI response translator + * + * CommandCode upstream emits NDJSON-style AI SDK v5 stream events: + * {"type":"start"} {"type":"start-step", ...} + * {"type":"reasoning-start","id":"..."} {"type":"reasoning-delta","text":"..."} + * {"type":"text-start","id":"..."} {"type":"text-delta","text":"..."} + * {"type":"tool-input-start","toolCallId","toolName"} + * {"type":"tool-input-delta","toolCallId","inputTextDelta"} + * {"type":"tool-call","toolCallId","toolName","input"} + * {"type":"finish-step","finishReason","usage": {...}, ...} + * {"type":"finish",...} + * + * Each upstream "event" arrives as one JSON object per line — we receive it as a string chunk + * already split per line by the upstream SSE/JSON-line reader in 9router. + */ +import { register } from "../index.js"; +import { FORMATS } from "../formats.js"; + +function ensureState(state, model) { + if (!state.responseId) { + state.responseId = `chatcmpl-${Date.now()}`; + state.created = Math.floor(Date.now() / 1000); + state.model = state.model || model || "commandcode"; + state.chunkIndex = 0; + state.toolIndex = 0; + state.toolIndexById = new Map(); + state.openTools = new Set(); + state.openText = false; + state.finishReason = null; + state.usage = null; + } +} + +function makeChunk(state, delta, finishReason = null) { + return { + id: state.responseId, + object: "chat.completion.chunk", + created: state.created, + model: state.model, + choices: [{ index: 0, delta, finish_reason: finishReason }], + }; +} + +function mapFinishReason(reason) { + switch (reason) { + case "stop": return "stop"; + case "length": return "length"; + case "tool-calls": + case "tool_use": return "tool_calls"; + case "content-filter": return "content_filter"; + case "error": return "stop"; + default: return reason || "stop"; + } +} + +export function convertCommandCodeToOpenAI(chunk, state) { + if (!chunk) return null; + + // Already-OpenAI chunk: pass through + if (chunk && typeof chunk === "object" && chunk.object === "chat.completion.chunk") { + return chunk; + } + + // Parse string lines coming out of upstream + let event = chunk; + if (typeof chunk === "string") { + const line = chunk.trim(); + if (!line) return null; + // Tolerate raw "data: {...}" framing if the upstream wrapper inserts it + const json = line.startsWith("data:") ? line.slice(5).trim() : line; + if (!json || json === "[DONE]") return null; + try { + event = JSON.parse(json); + } catch { + return null; + } + } + + if (!event || typeof event !== "object" || !event.type) return null; + + ensureState(state, event.model); + const out = []; + + switch (event.type) { + case "text-delta": { + const text = event.text || event.delta || ""; + if (!text) break; + const delta = state.chunkIndex === 0 ? { role: "assistant", content: text } : { content: text }; + state.chunkIndex++; + state.openText = true; + out.push(makeChunk(state, delta)); + break; + } + case "reasoning-delta": { + const text = event.text || ""; + if (!text) break; + // Map reasoning to OpenAI "reasoning_content" field (used by deepseek-reasoner-style clients). + const delta = state.chunkIndex === 0 + ? { role: "assistant", reasoning_content: text } + : { reasoning_content: text }; + state.chunkIndex++; + out.push(makeChunk(state, delta)); + break; + } + case "tool-input-start": { + const id = event.toolCallId || `call_${Date.now()}_${state.toolIndex}`; + let idx = state.toolIndexById.get(id); + if (idx == null) { + idx = state.toolIndex++; + state.toolIndexById.set(id, idx); + } + state.openTools.add(id); + const delta = { + ...(state.chunkIndex === 0 ? { role: "assistant" } : {}), + tool_calls: [{ + index: idx, + id, + type: "function", + function: { name: event.toolName || "", arguments: "" }, + }], + }; + state.chunkIndex++; + out.push(makeChunk(state, delta)); + break; + } + case "tool-input-delta": { + const id = event.toolCallId; + const idx = state.toolIndexById.get(id); + if (idx == null) break; + const delta = { + tool_calls: [{ + index: idx, + function: { arguments: event.inputTextDelta || event.delta || "" }, + }], + }; + out.push(makeChunk(state, delta)); + break; + } + case "tool-call": { + // Final consolidated tool call — only emit if we never saw tool-input-* deltas. + const id = event.toolCallId; + if (state.toolIndexById.has(id)) break; + const idx = state.toolIndex++; + state.toolIndexById.set(id, idx); + const argsStr = typeof event.input === "string" ? event.input : JSON.stringify(event.input ?? {}); + const delta = { + ...(state.chunkIndex === 0 ? { role: "assistant" } : {}), + tool_calls: [{ + index: idx, + id, + type: "function", + function: { name: event.toolName || "", arguments: argsStr }, + }], + }; + state.chunkIndex++; + out.push(makeChunk(state, delta)); + break; + } + case "finish-step": { + state.finishReason = mapFinishReason(event.finishReason); + if (event.usage) state.usage = event.usage; + break; + } + case "finish": { + const finishReason = state.finishReason || mapFinishReason(event.finishReason || "stop"); + const finalChunk = makeChunk(state, {}, finishReason); + const totalUsage = event.totalUsage || state.usage; + if (totalUsage) { + finalChunk.usage = { + prompt_tokens: totalUsage.inputTokens ?? 0, + completion_tokens: totalUsage.outputTokens ?? 0, + total_tokens: totalUsage.totalTokens ?? ((totalUsage.inputTokens ?? 0) + (totalUsage.outputTokens ?? 0)), + }; + } + out.push(finalChunk); + break; + } + case "error": { + state.finishReason = "stop"; + out.push(makeChunk(state, { content: `\n\n[CommandCode error: ${event.error || event.message || "unknown"}]` })); + out.push(makeChunk(state, {}, "stop")); + break; + } + // Silently ignore: start, start-step, reasoning-start, reasoning-end, text-start, text-end, + // provider-metadata, message-metadata, etc. They carry no client-visible content. + default: + break; + } + + return out.length ? out : null; +} + +register(FORMATS.COMMANDCODE, FORMATS.OPENAI, null, convertCommandCodeToOpenAI); diff --git a/public/providers/commandcode.png b/public/providers/commandcode.png new file mode 100644 index 00000000..ed7c8c99 Binary files /dev/null and b/public/providers/commandcode.png differ diff --git a/src/shared/constants/providers.js b/src/shared/constants/providers.js index 4423ad94..e7285d4b 100644 --- a/src/shared/constants/providers.js +++ b/src/shared/constants/providers.js @@ -69,6 +69,7 @@ export const APIKEY_PROVIDERS = { azure: { id: "azure", alias: "azure", name: "Azure OpenAI", icon: "cloud", color: "#0078D4", textIcon: "AZ", website: "https://azure.microsoft.com/en-us/products/ai-services/openai-service", notice: { apiKeyUrl: "https://portal.azure.com/#view/Microsoft_Azure_ProjectOxford/CognitiveServicesHub/~/OpenAI" }, hasProviderSpecificData: true }, deepseek: { id: "deepseek", alias: "ds", name: "DeepSeek", icon: "bolt", color: "#4D6BFE", textIcon: "DS", website: "https://deepseek.com", notice: { apiKeyUrl: "https://platform.deepseek.com/api_keys" } }, + commandcode: { id: "commandcode", alias: "cmc", name: "Command Code", icon: "smart_toy", color: "#000000", textIcon: "CC", website: "https://commandcode.ai", notice: { text: "Use your CommandCode CLI API key (starts with user_...) from ~/.commandcode/auth.json or commandcode.ai/studio.", apiKeyUrl: "https://commandcode.ai/studio" } }, groq: { id: "groq", alias: "groq", name: "Groq", icon: "speed", color: "#F55036", textIcon: "GQ", website: "https://groq.com", notice: { apiKeyUrl: "https://console.groq.com/keys" }, serviceKinds: ["llm", "imageToText", "stt"], sttConfig: { baseUrl: "https://api.groq.com/openai/v1/audio/transcriptions", authType: "apikey", authHeader: "bearer", format: "openai", models: [{ id: "whisper-large-v3", name: "Whisper Large v3" }, { id: "whisper-large-v3-turbo", name: "Whisper Large v3 Turbo" }, { id: "distil-whisper-large-v3-en", name: "Distil Whisper Large v3 EN" }] } }, xai: { id: "xai", alias: "xai", name: "xAI (Grok)", icon: "auto_awesome", color: "#1DA1F2", textIcon: "XA", website: "https://x.ai", notice: { apiKeyUrl: "https://console.x.ai" }, serviceKinds: ["llm", "imageToText", "webSearch"], searchViaChat: { defaultModel: "grok-4.20-reasoning", pricingUrl: "https://x.ai/api#pricing" } }, mistral: { id: "mistral", alias: "mistral", name: "Mistral", icon: "air", color: "#FF7000", textIcon: "MI", website: "https://mistral.ai", notice: { apiKeyUrl: "https://console.mistral.ai/api-keys" }, serviceKinds: ["llm", "imageToText", "embedding"], embeddingConfig: { baseUrl: "https://api.mistral.ai/v1/embeddings", authType: "apikey", authHeader: "bearer", models: [{ id: "mistral-embed", name: "Mistral Embed", dimensions: 1024 }] } },