Update jsconfig.json and package.json to correct open-sse path references from relative to local directory.

This commit is contained in:
decolua
2026-01-05 10:37:09 +07:00
parent 3857598de4
commit e35421beb1
39 changed files with 6846 additions and 6 deletions

View File

@@ -3,8 +3,8 @@
"baseUrl": ".",
"paths": {
"@/*": ["./src/*"],
"open-sse": ["../open-sse"],
"open-sse/*": ["../open-sse/*"]
"open-sse": ["./open-sse"],
"open-sse/*": ["./open-sse/*"]
},
"module": "ESNext",
"moduleResolution": "bundler"

8
open-sse/.npmignore Normal file
View File

@@ -0,0 +1,8 @@
node_modules/
*.log
.DS_Store
test/
*.test.js
.env
.env.*

View File

@@ -0,0 +1,209 @@
// Provider configurations
export const PROVIDERS = {
claude: {
baseUrl: "https://api.anthropic.com/v1/messages",
format: "claude",
headers: {
"Anthropic-Version": "2023-06-01",
"Anthropic-Beta": "claude-code-20250219,oauth-2025-04-20,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14,context-management-2025-06-27",
"Anthropic-Dangerous-Direct-Browser-Access": "true",
"User-Agent": "claude-cli/1.0.83 (external, cli)",
"X-App": "cli",
"X-Stainless-Helper-Method": "stream",
"X-Stainless-Retry-Count": "0",
"X-Stainless-Runtime-Version": "v24.3.0",
"X-Stainless-Package-Version": "0.55.1",
"X-Stainless-Runtime": "node",
"X-Stainless-Lang": "js",
"X-Stainless-Arch": "arm64",
"X-Stainless-Os": "MacOS",
"X-Stainless-Timeout": "60"
},
// Claude OAuth configuration
clientId: "9d1c250a-e61b-44d9-88ed-5944d1962f5e",
tokenUrl: "https://console.anthropic.com/v1/oauth/token"
},
gemini: {
baseUrl: "https://generativelanguage.googleapis.com/v1beta/models",
format: "gemini",
clientId: "681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com",
clientSecret: "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"
},
"gemini-cli": {
baseUrl: "https://cloudcode-pa.googleapis.com/v1internal",
format: "gemini-cli",
clientId: "681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com",
clientSecret: "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"
},
codex: {
baseUrl: "https://chatgpt.com/backend-api/codex/responses",
format: "codex",
headers: {
"Version": "0.21.0",
"Openai-Beta": "responses=experimental",
"User-Agent": "codex_cli_rs/0.50.0 (Mac OS 26.0.1; arm64)"
},
// OpenAI OAuth configuration
clientId: "app_EMoamEEZ73f0CkXaXp7hrann",
clientSecret: "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",
tokenUrl: "https://auth.openai.com/oauth/token"
},
qwen: {
baseUrl: "https://portal.qwen.ai/v1/chat/completions",
format: "openai",
headers: {
"User-Agent": "google-api-nodejs-client/9.15.1",
"X-Goog-Api-Client": "gl-node/22.17.0"
},
// Qwen OAuth configuration
clientId: "f0304373b74a44d2b584a3fb70ca9e56", // From CLIProxyAPI
tokenUrl: "https://chat.qwen.ai/api/v1/oauth2/token",
authUrl: "https://chat.qwen.ai/api/v1/oauth2/device/code"
},
iflow: {
baseUrl: "https://apis.iflow.cn/v1/chat/completions",
format: "openai",
headers: {
"User-Agent": "iFlow-Cli"
},
// iFlow OAuth configuration (from CLIProxyAPI)
clientId: "10009311001",
clientSecret: "4Z3YjXycVsQvyGF1etiNlIBB4RsqSDtW",
tokenUrl: "https://iflow.cn/oauth/token",
authUrl: "https://iflow.cn/oauth"
},
antigravity: {
baseUrls: [
"https://daily-cloudcode-pa.sandbox.googleapis.com",
"https://cloudcode-pa.googleapis.com"
],
format: "antigravity",
headers: {
"User-Agent": "antigravity/1.11.5 windows/amd64"
},
clientId: "1071006060591-tmhssin2h21lcre235vtolojh4g403ep.apps.googleusercontent.com",
clientSecret: "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"
},
openrouter: {
baseUrl: "https://openrouter.ai/api/v1/chat/completions",
format: "openai",
headers: {
"HTTP-Referer": "https://endpoint-proxy.local",
"X-Title": "Endpoint Proxy"
}
},
openai: {
baseUrl: "https://api.openai.com/v1/chat/completions",
format: "openai"
},
glm: {
baseUrl: "https://api.z.ai/api/anthropic/v1/messages",
format: "claude",
headers: {
"Anthropic-Version": "2023-06-01",
"Anthropic-Beta": "claude-code-20250219,interleaved-thinking-2025-05-14"
}
},
kimi: {
baseUrl: "https://api.kimi.com/coding/v1/messages",
format: "claude",
headers: {
"Anthropic-Version": "2023-06-01",
"Anthropic-Beta": "claude-code-20250219,interleaved-thinking-2025-05-14"
}
},
minimax: {
baseUrl: "https://api.minimax.io/anthropic/v1/messages",
format: "claude",
headers: {
"Anthropic-Version": "2023-06-01",
"Anthropic-Beta": "claude-code-20250219,interleaved-thinking-2025-05-14"
}
},
github: {
baseUrl: "https://api.githubcopilot.com/chat/completions", // GitHub Copilot API endpoint for chat
format: "openai", // GitHub Copilot uses OpenAI-compatible format
headers: {
"copilot-integration-id": "vscode-chat",
"editor-version": "vscode/1.107.1",
"editor-plugin-version": "copilot-chat/0.26.7",
"user-agent": "GitHubCopilotChat/0.26.7",
"openai-intent": "conversation-panel",
"x-github-api-version": "2025-04-01",
"x-vscode-user-agent-library-version": "electron-fetch",
"X-Initiator": "user",
"Accept": "application/json",
"Content-Type": "application/json"
}
}
};
// Claude system prompt
export const CLAUDE_SYSTEM_PROMPT = "You are Claude Code, Anthropic's official CLI for Claude.";
// OAuth endpoints
export const OAUTH_ENDPOINTS = {
google: {
token: "https://oauth2.googleapis.com/token",
auth: "https://accounts.google.com/o/oauth2/auth"
},
openai: {
token: "https://auth.openai.com/oauth/token",
auth: "https://auth.openai.com/oauth/authorize"
},
anthropic: {
token: "https://console.anthropic.com/v1/oauth/token",
auth: "https://console.anthropic.com/v1/oauth/authorize"
},
qwen: {
token: "https://chat.qwen.ai/api/v1/oauth2/token", // From CLIProxyAPI
auth: "https://chat.qwen.ai/api/v1/oauth2/device/code" // From CLIProxyAPI
},
iflow: {
token: "https://iflow.cn/oauth/token",
auth: "https://iflow.cn/oauth"
},
github: {
token: "https://github.com/login/oauth/access_token",
auth: "https://github.com/login/oauth/authorize",
deviceCode: "https://github.com/login/device/code"
}
};
// Cache TTLs (seconds)
export const CACHE_TTL = {
userInfo: 300, // 5 minutes
modelAlias: 3600 // 1 hour
};
// Default max tokens
export const DEFAULT_MAX_TOKENS = 64000;
// Minimum max tokens for tool calling (to prevent truncated arguments)
export const DEFAULT_MIN_TOKENS = 32000;
// Exponential backoff config for rate limits (like CLIProxyAPI)
export const BACKOFF_CONFIG = {
base: 1000, // 1 second base
max: 30 * 60 * 1000, // 30 minutes max
maxLevel: 15 // Cap backoff level
};
// Error-based cooldown times (aligned with CLIProxyAPI)
export const COOLDOWN_MS = {
unauthorized: 30 * 60 * 1000, // 401 → 30 min
paymentRequired: 30 * 60 * 1000, // 402/403 → 30 min
notFound: 12 * 60 * 60 * 1000, // 404 → 12 hours
transient: 30 * 1000, // 408/500/502/503/504 → 1 min
requestNotAllowed: 5 * 1000, // "Request not allowed" → 5 sec
// Legacy aliases for backward compatibility
rateLimit: 15 * 60 * 1000,
serviceUnavailable: 60 * 1000,
authExpired: 30 * 60 * 1000
};
// Skip patterns - requests containing these texts will bypass provider
export const SKIP_PATTERNS = [
"Please write a 5-10 word title for the following conversation:"
];

View File

@@ -0,0 +1,7 @@
// Default signature for thinking mode when no signature from thinkingStore
export const DEFAULT_THINKING_CLAUDE_SIGNATURE = "EpwGCkYIChgCKkCzVUuRrg7CcglSUWEef4rH6o35g9UYS8ZPe0/VomQTBsFx6sttYNj5l8GqgW6ejuHyYqpFToxIbZl0bw17l5dJEgzCnqDO0Z8fRlMrNgsaDLS1cnCjC53KBqE0CCIwAADQdo1eO+7qPAmo8J4WR3JPmr92S97kmvr5K1iPMiOpkZNj8mEXW8uzBoOJs/9ZKoMFiqHJ3UObwaJDqFOW70E9oCwDoc6jesaWVAEdN5vWfKMpIkjFJjECdjIdkxyJNJ8Ib8yXVal3qwE7uThoPRqSZDdHB5mmwPEjWE/90cSYCbtX2YsJki1265CabBb8/QEkODXg4kgRrL+c8e8rRXz/dr1RswvaPuzEdGKHRNi9UooNUeOK4/ebx1KkP9YZttyohN9GWqlts36kOoW0Cfie/ABDgF9g534BPth/sstxDM6d79QlRmh6NxizyTF74DXJI34u0M4tTRchqE5pAq85SgdJaa+dix1yJPMji8m6nZkwJbscJb9rdc2MKyKWjz8QL2+rTSSuZ2F1k1qSsW0xNcI7qLcI12Vncfn/VqY6YOIZy/saZBR0ezXvN6g+UYbuIdyVg7AyIFZt3nbrO7/kmOEb2VKzygwklHGEIJHfFgMpH3JSrAzbZIowVHOF7VaJ+KXRFDCFin7hHTOiOsdg+1ij1mML9Z/x/9CP4b7OUcaQm1llDZPSHc6rZMNL3DdB+fW5YfmNgKU35S+7AMtA10nVILzDAk1UV4T2K9Do09JlI6rjOs9UuULlIN2Z0eE8YTlANR6uQcw7lMcdfqYE8tke4rDKc2dDiaS5vVe45VewICNpdXGN11yw8QqH7p27CR1HtN30e0tHXOR3bIwWk/Yb6O5fTaKG6Ri8e5ZCPvdD9HqepVi188nM0iTjJqL58F3ni04ECIhcbyaQWnuTes1Kw4CMwiZDLQkk8Hgz7HkUOf1btQTF/0nhD7ry0n0hAEg2PaDM3V6TjOjf4hEldRmeqERcQF1PfgKb6ZM12rlIIfUqKACczWJSzTV158+47HX36o0cgux6nFlv/DE+sEiRVxgB";
export const DEFAULT_THINKING_GEMINI_SIGNATURE = "EuwGCukGAXLI2nxwZIq54WWSoL/YN0P3TsDZ7zRnLi8g0S4aVr2HUGxvaHKySuY6HAVzcE0GPGjXrytLIldxthSvfxgUlJh6Qa9Z+Oj5QZBlYdg6HaJ6yuY5R7waE6rdwBsRf7Ft2j3DJ9rMi9qhWFqApewYtPhls3VHtuvND3l8Rm09+lbAXQs6KKWEWrxNLKTBkfpMgXhRERc/TQRMZu1twAablm6/Zk1tsYRvfWKLsNbeKF+CCojJdXJKvnR/8Ouuoa+Y2Ti20hcW7aZIIjZDFYPU//k6Ybmhg69J/imbFai2ckhfLaisqdDkdoIiBJScTOUvYqP6AE9d4MsydSC+UlhIMk4hoP76R8vUSCZRMkjOaDXstf/QoVZKbt94wyRZgAJ1G0BqI8L5ow86kLpA4wJEtxsRGymOE4bKUvApveBakYDNM9APkf+LbtbzWSseGjoZcSlycF9iN8Q2XNYKRrHbv3Lr5Y8JjdH/5y/6SHkNehTEZugaeGnSPSyCTWto1kQgHpxdWmhkLfJGNUGLmue7Mesj4TSms4J33mRpYVhNB/J333FCqIP0hr/E7BkkjEn7yZ4X7SQlh+xKPurapsnHRwiKmtsilmEFrnTE9iQr+pMr6M29qqFNv1tr5yumbaJw8JW9sB15tNsRv+dW6BjNanbsKz7HCgKUBc8tGy+7YuhXzAfViyRefcjK7eZW0Fbyt7AbybJTKz78W8NH7ye6LAwzOebXpeZ4D43fNIt8bKh26qgduSQv/7o+pAflkuqHZ99YWgHQ8h8OkZFi3eOiSYjsjhdZ/czWOdoPI/OnqIldzMPF5YlrKBLFX8VhRKVmqgsmWf5PHGulHhMkVlS+XG2UIseGy69ARa93D78Gsa+1n1kJr7EEB7Rh+27vUMxVYLdz1yMSvE5nalTAlg/ZeG8+XQ0cHuAI3KbQpHW2Q++RdXfm5JzD5WdJZUU+Zn8t8UUn85BH4RxZLeE0qJikgSsKoYVBc6YhiMjhPgkR95ReimY4Z0xCJdRo1gjexOFeODZMpQF6Yxnoic7IrdgsFA3iePTbFnPp3IAM1fAThWhXJUn3QInUOTd5o1qmTmn6REbL15g/JQNl+dqUoPkhleeb2V3kjqp1okmO3wMZbPknR3S1LZNmlS72/iBQUm+n2b/RCn4PjmM2";
export const DEFAULT_THINKING_TEXT = "...";

View File

@@ -0,0 +1,19 @@
export const ollamaModels = {
models: [
{
name: "llama3.2",
modified_at: "2025-12-26T00:00:00Z",
size: 2000000000,
digest: "abc123def456",
details: { format: "gguf", family: "llama", parameter_size: "3B", quantization_level: "Q4_K_M" }
},
{
name: "qwen2.5",
modified_at: "2025-12-26T00:00:00Z",
size: 4000000000,
digest: "def456abc123",
details: { format: "gguf", family: "qwen", parameter_size: "7B", quantization_level: "Q4_K_M" }
}
]
};

View File

@@ -0,0 +1,161 @@
// Provider models - Single source of truth
// Key = alias (cc, cx, gc, qw, if, ag, gh for OAuth; id for API Key)
// Field "provider" for special cases (e.g. AntiGravity models that call different backends)
export const PROVIDER_MODELS = {
// OAuth Providers (using alias)
cc: [ // Claude Code
{ id: "claude-opus-4-5-20251101", name: "Claude 4.5 Opus" },
{ id: "claude-sonnet-4-5-20250929", name: "Claude 4.5 Sonnet" },
{ id: "claude-haiku-4-5-20251001", name: "Claude 4.5 Haiku" },
],
cx: [ // OpenAI Codex
{ id: "gpt-5.2-codex", name: "GPT 5.2 Codex" },
{ id: "gpt-5.2", name: "GPT 5.2" },
{ id: "gpt-5.1-codex-max", name: "GPT 5.1 Codex Max" },
{ id: "gpt-5.1-codex", name: "GPT 5.1 Codex" },
{ id: "gpt-5.1-codex-mini", name: "GPT 5.1 Codex Mini" },
{ id: "gpt-5.1", name: "GPT 5.1" },
{ id: "gpt-5-codex", name: "GPT 5 Codex" },
{ id: "gpt-5-codex-mini", name: "GPT 5 Codex Mini" },
],
gc: [ // Gemini CLI
{ id: "gemini-3-flash-preview", name: "Gemini 3 Flash Preview" },
{ id: "gemini-3-pro-preview", name: "Gemini 3 Pro Preview" },
{ id: "gemini-2.5-pro", name: "Gemini 2.5 Pro" },
{ id: "gemini-2.5-flash", name: "Gemini 2.5 Flash" },
{ id: "gemini-2.5-flash-lite", name: "Gemini 2.5 Flash Lite" },
],
qw: [ // Qwen Code
{ id: "qwen3-coder-plus", name: "Qwen3 Coder Plus" },
{ id: "qwen3-coder-flash", name: "Qwen3 Coder Flash" },
{ id: "vision-model", name: "Qwen3 Vision Model" },
],
if: [ // iFlow AI
{ id: "qwen3-coder-plus", name: "Qwen3 Coder Plus" },
{ id: "kimi-k2", name: "Kimi K2" },
{ id: "kimi-k2-thinking", name: "Kimi K2 Thinking" },
{ id: "deepseek-r1", name: "DeepSeek R1" },
{ id: "deepseek-v3.2-chat", name: "DeepSeek V3.2 Chat" },
{ id: "deepseek-v3.2-reasoner", name: "DeepSeek V3.2 Reasoner" },
{ id: "minimax-m2", name: "MiniMax M2" },
{ id: "glm-4.6", name: "GLM 4.6" },
{ id: "glm-4.7", name: "GLM 4.7" },
],
ag: [ // Antigravity - special case: models call different backends
{ id: "gemini-3-pro-low", name: "Gemini 3 Pro Low" },
{ id: "gemini-3-pro-high", name: "Gemini 3 Pro High" },
{ id: "gemini-3-flash", name: "Gemini 3 Flash" },
{ id: "gemini-2.5-flash", name: "Gemini 2.5 Flash" },
{ id: "claude-sonnet-4-5", name: "Claude Sonnet 4.5 " },
{ id: "claude-sonnet-4-5-thinking", name: "Claude Sonnet 4.5 Thinking" },
{ id: "claude-opus-4-5-thinking", name: "Claude Opus 4.5" },
],
gh: [ // GitHub Copilot
{ id: "gpt-5", name: "GPT-5" },
{ id: "gpt-5-mini", name: "GPT-5 Mini" },
// { id: "gpt-5.1", name: "GPT-5.1" },
// { id: "gpt-5.2", name: "GPT-5.2" },
// { id: "gpt-5-codex", name: "GPT-5 Codex" },
{ id: "gpt-5.1-codex", name: "GPT-5.1 Codex" },
// { id: "gpt-5.1-codex-mini", name: "GPT-5.1 Codex Mini" },
{ id: "gpt-5.1-codex-max", name: "GPT-5.1 Codex Max" },
{ id: "gpt-4.1", name: "GPT-4.1" },
{ id: "claude-4.5-sonnet", name: "Claude 4.5 Sonnet" },
{ id: "claude-4.5-opus", name: "Claude 4.5 Opus" },
{ id: "claude-4.5-haiku", name: "Claude 4.5 Haiku" },
{ id: "gemini-3-pro", name: "Gemini 3 Pro" },
{ id: "gemini-3-flash", name: "Gemini 3 Flash" },
{ id: "gemini-2.5-pro", name: "Gemini 2.5 Pro" },
{ id: "grok-code-fast-1", name: "Grok Code Fast 1" },
],
// API Key Providers (alias = id)
openai: [
{ id: "gpt-4o", name: "GPT-4o" },
{ id: "gpt-4o-mini", name: "GPT-4o Mini" },
{ id: "gpt-4-turbo", name: "GPT-4 Turbo" },
{ id: "o1", name: "O1" },
{ id: "o1-mini", name: "O1 Mini" },
],
anthropic: [
{ id: "claude-sonnet-4-20250514", name: "Claude Sonnet 4" },
{ id: "claude-opus-4-20250514", name: "Claude Opus 4" },
{ id: "claude-3-5-sonnet-20241022", name: "Claude 3.5 Sonnet" },
],
gemini: [
{ id: "gemini-3-pro-preview", name: "Gemini 3 Pro Preview" },
{ id: "gemini-2.5-pro", name: "Gemini 2.5 Pro" },
{ id: "gemini-2.5-flash", name: "Gemini 2.5 Flash" },
{ id: "gemini-2.5-flash-lite", name: "Gemini 2.5 Flash Lite" },
],
openrouter: [
{ id: "auto", name: "Auto (Best Available)" },
],
glm: [
{ id: "glm-4.7", name: "GLM 4.7" },
{ id: "glm-4.6", name: "GLM 4.6" },
{ id: "glm-4.6v", name: "GLM 4.6V (Vision)" },
],
kimi: [
{ id: "kimi-latest", name: "Kimi Latest" },
],
minimax: [
{ id: "MiniMax-M2.1", name: "MiniMax M2.1" },
],
};
// Helper functions
export function getProviderModels(aliasOrId) {
return PROVIDER_MODELS[aliasOrId] || [];
}
export function getDefaultModel(aliasOrId) {
const models = PROVIDER_MODELS[aliasOrId];
return models?.[0]?.id || null;
}
export function isValidModel(aliasOrId, modelId, passthroughProviders = new Set()) {
if (passthroughProviders.has(aliasOrId)) return true;
const models = PROVIDER_MODELS[aliasOrId];
if (!models) return false;
return models.some(m => m.id === modelId);
}
export function findModelName(aliasOrId, modelId) {
const models = PROVIDER_MODELS[aliasOrId];
if (!models) return modelId;
const found = models.find(m => m.id === modelId);
return found?.name || modelId;
}
export function getModelTargetFormat(aliasOrId, modelId) {
const models = PROVIDER_MODELS[aliasOrId];
if (!models) return null;
const found = models.find(m => m.id === modelId);
return found?.targetFormat || null;
}
// Provider ID to alias mapping
export const PROVIDER_ID_TO_ALIAS = {
claude: "cc",
codex: "cx",
"gemini-cli": "gc",
qwen: "qw",
iflow: "if",
antigravity: "ag",
github: "gh",
openai: "openai",
anthropic: "anthropic",
gemini: "gemini",
openrouter: "openrouter",
glm: "glm",
kimi: "kimi",
minimax: "minimax",
};
export function getModelsByProviderId(providerId) {
const alias = PROVIDER_ID_TO_ALIAS[providerId] || providerId;
return PROVIDER_MODELS[alias] || [];
}

View File

@@ -0,0 +1,277 @@
import { detectFormat, getTargetFormat, buildProviderUrl, buildProviderHeaders } from "../services/provider.js";
import { translateRequest, needsTranslation } from "../translator/index.js";
import { FORMATS } from "../translator/formats.js";
import { createSSETransformStreamWithLogger, createPassthroughStreamWithLogger, COLORS } from "../utils/stream.js";
import { createStreamController, pipeWithDisconnect } from "../utils/streamHandler.js";
import { refreshTokenByProvider, refreshWithRetry } from "../services/tokenRefresh.js";
import { createRequestLogger } from "../utils/requestLogger.js";
import { getModelTargetFormat, PROVIDER_ID_TO_ALIAS } from "../config/providerModels.js";
import { createErrorResult, parseUpstreamError, formatProviderError } from "../utils/error.js";
import { handleBypassRequest } from "../utils/bypassHandler.js";
/**
* Core chat handler - shared between SSE and Worker
* Returns { success, response, status, error } for caller to handle fallback
* @param {object} options
* @param {object} options.body - Request body
* @param {object} options.modelInfo - { provider, model }
* @param {object} options.credentials - Provider credentials
* @param {object} options.log - Logger instance (optional)
* @param {function} options.onCredentialsRefreshed - Callback when credentials are refreshed
* @param {function} options.onRequestSuccess - Callback when request succeeds (to clear error status)
* @param {function} options.onDisconnect - Callback when client disconnects
*/
export async function handleChatCore({ body, modelInfo, credentials, log, onCredentialsRefreshed, onRequestSuccess, onDisconnect, clientRawRequest }) {
const { provider, model } = modelInfo;
const sourceFormat = detectFormat(body);
// Check for bypass patterns (warmup, skip) - return fake response
const bypassResponse = handleBypassRequest(body, model);
if (bypassResponse) {
return bypassResponse;
}
// Detect source format and get target format
// Model-specific targetFormat takes priority over provider default
const alias = PROVIDER_ID_TO_ALIAS[provider] || provider;
const modelTargetFormat = getModelTargetFormat(alias, model);
const targetFormat = modelTargetFormat || getTargetFormat(provider);
const stream = body.stream !== false;
// Create request logger for this session: sourceFormat_targetFormat_model
const reqLogger = await createRequestLogger(sourceFormat, targetFormat, model);
// 0. Log client raw request (before any conversion)
if (clientRawRequest) {
reqLogger.logClientRawRequest(
clientRawRequest.endpoint,
clientRawRequest.body,
clientRawRequest.headers
);
}
// 1. Log raw request from client
reqLogger.logRawRequest(body);
// 1a. Log format detection info
reqLogger.logFormatInfo({
sourceFormat,
targetFormat,
provider,
model,
stream
});
log?.debug?.("FORMAT", `${sourceFormat}${targetFormat} | stream=${stream}`);
// Translate request
let translatedBody = body;
translatedBody = translateRequest(sourceFormat, targetFormat, model, body, stream, credentials, provider);
// Update model in body
translatedBody.model = model;
// Build provider URL and headers
const providerUrl = buildProviderUrl(provider, model, stream);
const providerHeaders = buildProviderHeaders(provider, credentials, stream, translatedBody);
// 2. Log converted request to provider
reqLogger.logConvertedRequest(providerUrl, providerHeaders, translatedBody);
const msgCount = translatedBody.messages?.length
|| translatedBody.contents?.length
|| translatedBody.request?.contents?.length
|| 0;
log?.debug?.("REQUEST", `${provider.toUpperCase()} | ${model} | ${msgCount} msgs`);
// Log headers (mask sensitive values)
const safeHeaders = {};
for (const [key, value] of Object.entries(providerHeaders)) {
if (key.toLowerCase().includes("auth") || key.toLowerCase().includes("key") || key.toLowerCase().includes("token")) {
safeHeaders[key] = value ? `${value.slice(0, 10)}...` : "";
} else {
safeHeaders[key] = value;
}
}
log?.debug?.("HEADERS", JSON.stringify(safeHeaders));
// Create stream controller for disconnect detection
const streamController = createStreamController({ onDisconnect, log, provider, model });
// Make request to provider with abort signal
let providerResponse;
try {
providerResponse = await fetch(providerUrl, {
method: "POST",
headers: providerHeaders,
body: JSON.stringify(translatedBody),
signal: streamController.signal
});
} catch (error) {
if (error.name === "AbortError") {
streamController.handleError(error);
return createErrorResult(499, "Request aborted");
}
const errMsg = formatProviderError(error, provider, model);
console.log(`${COLORS.red}[ERROR] ${errMsg}${COLORS.reset}`);
return createErrorResult(502, errMsg);
}
// Handle 401/403 - try token refresh
if (providerResponse.status === 401 || providerResponse.status === 403) {
let newCredentials = null;
// GitHub needs special handling - refresh copilotToken using accessToken
if (provider === "github") {
const { refreshCopilotToken, refreshGitHubToken } = await import("../services/tokenRefresh.js");
// First try refreshing copilotToken using existing accessToken
let copilotResult = await refreshCopilotToken(credentials.accessToken, log);
// If that fails, refresh GitHub accessToken first, then get new copilotToken
if (!copilotResult && credentials.refreshToken) {
const githubTokens = await refreshGitHubToken(credentials.refreshToken, log);
if (githubTokens?.accessToken) {
credentials.accessToken = githubTokens.accessToken;
if (githubTokens.refreshToken) {
credentials.refreshToken = githubTokens.refreshToken;
}
copilotResult = await refreshCopilotToken(githubTokens.accessToken, log);
}
}
if (copilotResult?.token) {
credentials.copilotToken = copilotResult.token;
newCredentials = {
accessToken: credentials.accessToken,
refreshToken: credentials.refreshToken,
providerSpecificData: {
...credentials.providerSpecificData,
copilotToken: copilotResult.token,
copilotTokenExpiresAt: copilotResult.expiresAt
}
};
log?.info?.("TOKEN", `${provider.toUpperCase()} | copilotToken refreshed`);
}
} else {
newCredentials = await refreshWithRetry(
() => refreshTokenByProvider(provider, credentials, log),
3,
log
);
}
if (newCredentials?.accessToken || (provider === "github" && credentials.copilotToken)) {
if (newCredentials?.accessToken) {
log?.info?.("TOKEN", `${provider.toUpperCase()} | refreshed`);
credentials.accessToken = newCredentials.accessToken;
}
if (newCredentials?.refreshToken) {
credentials.refreshToken = newCredentials.refreshToken;
}
if (newCredentials?.providerSpecificData) {
credentials.providerSpecificData = {
...credentials.providerSpecificData,
...newCredentials.providerSpecificData
};
}
// Notify caller about refreshed credentials
if (onCredentialsRefreshed && newCredentials) {
await onCredentialsRefreshed(newCredentials);
}
// Retry with new credentials
const newHeaders = buildProviderHeaders(provider, credentials, stream, translatedBody);
const retryResponse = await fetch(providerUrl, {
method: "POST",
headers: newHeaders,
body: JSON.stringify(translatedBody),
signal: streamController.signal
});
if (retryResponse.ok) {
providerResponse = retryResponse;
}
} else {
log?.warn?.("TOKEN", `${provider.toUpperCase()} | refresh failed`);
}
}
// Check provider response - return error info for fallback handling
if (!providerResponse.ok) {
const { statusCode, message } = await parseUpstreamError(providerResponse);
const errMsg = formatProviderError(new Error(message), provider, model);
console.log(`${COLORS.red}[ERROR] ${errMsg}${COLORS.reset}`);
// Log error with full request body for debugging
reqLogger.logError(new Error(message), translatedBody);
return createErrorResult(statusCode, errMsg);
}
// Non-streaming response
if (!stream) {
const responseBody = await providerResponse.json();
// Notify success - caller can clear error status if needed
if (onRequestSuccess) {
await onRequestSuccess();
}
return {
success: true,
response: new Response(JSON.stringify(responseBody), {
headers: {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*"
}
})
};
}
// Streaming response
// Notify success - caller can clear error status if needed
if (onRequestSuccess) {
await onRequestSuccess();
}
const responseHeaders = {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*"
};
// Create transform stream with logger for streaming response
let transformStream;
if (needsTranslation(targetFormat, sourceFormat)) {
transformStream = createSSETransformStreamWithLogger(targetFormat, sourceFormat, provider, reqLogger);
} else {
transformStream = createPassthroughStreamWithLogger(provider, reqLogger);
}
// Pipe response through transform with disconnect detection
const transformedBody = pipeWithDisconnect(providerResponse, transformStream, streamController);
return {
success: true,
response: new Response(transformedBody, {
headers: responseHeaders
})
};
}
/**
* Check if token is expired or about to expire
*/
export function isTokenExpiringSoon(expiresAt, bufferMs = 5 * 60 * 1000) {
if (!expiresAt) return false;
const expiresAtMs = new Date(expiresAt).getTime();
return expiresAtMs - Date.now() < bufferMs;
}

View File

@@ -0,0 +1,69 @@
/**
* Responses API Handler for Workers
* Converts Chat Completions to Codex Responses API format
*/
import { handleChatCore } from "./chatCore.js";
import { convertResponsesApiFormat } from "../translator/helpers/responsesApiHelper.js";
import { createResponsesApiTransformStream } from "../transformer/responsesTransformer.js";
/**
* Handle /v1/responses request
* @param {object} options
* @param {object} options.body - Request body (Responses API format)
* @param {object} options.modelInfo - { provider, model }
* @param {object} options.credentials - Provider credentials
* @param {object} options.log - Logger instance (optional)
* @param {function} options.onCredentialsRefreshed - Callback when credentials are refreshed
* @param {function} options.onRequestSuccess - Callback when request succeeds
* @param {function} options.onDisconnect - Callback when client disconnects
* @returns {Promise<{success: boolean, response?: Response, status?: number, error?: string}>}
*/
export async function handleResponsesCore({ body, modelInfo, credentials, log, onCredentialsRefreshed, onRequestSuccess, onDisconnect }) {
// Convert Responses API format to Chat Completions format
const convertedBody = convertResponsesApiFormat(body);
// Ensure stream is enabled
convertedBody.stream = true;
// Call chat core handler
const result = await handleChatCore({
body: convertedBody,
modelInfo,
credentials,
log,
onCredentialsRefreshed,
onRequestSuccess,
onDisconnect
});
if (!result.success || !result.response) {
return result;
}
const response = result.response;
const contentType = response.headers.get("Content-Type") || "";
// If not SSE or error, return as-is
if (!contentType.includes("text/event-stream") || response.status !== 200) {
return result;
}
// Transform SSE stream to Responses API format (no logging in worker)
const transformStream = createResponsesApiTransformStream(null);
const transformedBody = response.body.pipeThrough(transformStream);
return {
success: true,
response: new Response(transformedBody, {
status: 200,
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*"
}
})
};
}

66
open-sse/index.js Normal file
View File

@@ -0,0 +1,66 @@
// Config
export { PROVIDERS, OAUTH_ENDPOINTS, CACHE_TTL, DEFAULT_MAX_TOKENS, CLAUDE_SYSTEM_PROMPT, COOLDOWN_MS, BACKOFF_CONFIG } from "./config/constants.js";
export {
PROVIDER_MODELS,
getProviderModels,
getDefaultModel,
isValidModel,
findModelName,
getModelTargetFormat,
PROVIDER_ID_TO_ALIAS,
getModelsByProviderId
} from "./config/providerModels.js";
// Translator
export { FORMATS } from "./translator/formats.js";
export {
register,
translateRequest,
translateResponse,
needsTranslation,
initState,
initTranslators
} from "./translator/index.js";
// Services
export {
detectFormat,
getProviderConfig,
buildProviderUrl,
buildProviderHeaders,
getTargetFormat
} from "./services/provider.js";
export { parseModel, resolveModelAliasFromMap, getModelInfoCore } from "./services/model.js";
export {
checkFallbackError,
isAccountUnavailable,
getUnavailableUntil,
filterAvailableAccounts
} from "./services/accountFallback.js";
export {
TOKEN_EXPIRY_BUFFER_MS,
refreshAccessToken,
refreshClaudeOAuthToken,
refreshGoogleToken,
refreshQwenToken,
refreshCodexToken,
refreshIflowToken,
refreshGitHubToken,
refreshCopilotToken,
getAccessToken,
refreshTokenByProvider
} from "./services/tokenRefresh.js";
// Handlers
export { handleChatCore, isTokenExpiringSoon } from "./handlers/chatCore.js";
export { createStreamController, pipeWithDisconnect, createDisconnectAwareStream } from "./utils/streamHandler.js";
// Utils
export { errorResponse, formatProviderError } from "./utils/error.js";
export {
createSSETransformStreamWithLogger,
createPassthroughStreamWithLogger
} from "./utils/stream.js";

View File

@@ -0,0 +1,148 @@
import { COOLDOWN_MS, BACKOFF_CONFIG } from "../config/constants.js";
/**
* Calculate exponential backoff cooldown for rate limits (429)
* Level 0: 1s, Level 1: 2s, Level 2: 4s... → max 30 min
* @param {number} backoffLevel - Current backoff level
* @returns {number} Cooldown in milliseconds
*/
export function getQuotaCooldown(backoffLevel = 0) {
const cooldown = BACKOFF_CONFIG.base * Math.pow(2, backoffLevel);
return Math.min(cooldown, BACKOFF_CONFIG.max);
}
/**
* Check if error should trigger account fallback (switch to next account)
* @param {number} status - HTTP status code
* @param {string} errorText - Error message text
* @param {number} backoffLevel - Current backoff level for exponential backoff
* @returns {{ shouldFallback: boolean, cooldownMs: number, newBackoffLevel?: number }}
*/
export function checkFallbackError(status, errorText, backoffLevel = 0) {
// Check error message FIRST - specific patterns take priority over status codes
if (errorText) {
const lowerError = errorText.toLowerCase();
// "Request not allowed" - short cooldown (5s), takes priority over status code
if (lowerError.includes("request not allowed")) {
return { shouldFallback: true, cooldownMs: COOLDOWN_MS.requestNotAllowed };
}
// Rate limit keywords - exponential backoff
if (
lowerError.includes("rate limit") ||
lowerError.includes("too many requests") ||
lowerError.includes("quota exceeded") ||
lowerError.includes("capacity") ||
lowerError.includes("overloaded")
) {
const newLevel = Math.min(backoffLevel + 1, BACKOFF_CONFIG.maxLevel);
return {
shouldFallback: true,
cooldownMs: getQuotaCooldown(backoffLevel),
newBackoffLevel: newLevel
};
}
}
// 401 - Authentication error (token expired/invalid)
if (status === 401) {
return { shouldFallback: true, cooldownMs: COOLDOWN_MS.unauthorized };
}
// 402/403 - Payment required / Forbidden (quota/permission)
if (status === 402 || status === 403) {
return { shouldFallback: true, cooldownMs: COOLDOWN_MS.paymentRequired };
}
// 404 - Model not found (long cooldown)
if (status === 404) {
return { shouldFallback: true, cooldownMs: COOLDOWN_MS.notFound };
}
// 429 - Rate limit with exponential backoff
if (status === 429) {
const newLevel = Math.min(backoffLevel + 1, BACKOFF_CONFIG.maxLevel);
return {
shouldFallback: true,
cooldownMs: getQuotaCooldown(backoffLevel),
newBackoffLevel: newLevel
};
}
// 408/500/502/503/504 - Transient errors (short cooldown)
if (status === 408 || status === 500 || status === 502 || status === 503 || status === 504) {
return { shouldFallback: true, cooldownMs: COOLDOWN_MS.transient };
}
return { shouldFallback: false, cooldownMs: 0 };
}
/**
* Check if account is currently unavailable (cooldown not expired)
*/
export function isAccountUnavailable(unavailableUntil) {
if (!unavailableUntil) return false;
return new Date(unavailableUntil).getTime() > Date.now();
}
/**
* Calculate unavailable until timestamp
*/
export function getUnavailableUntil(cooldownMs) {
return new Date(Date.now() + cooldownMs).toISOString();
}
/**
* Filter available accounts (not in cooldown)
*/
export function filterAvailableAccounts(accounts, excludeId = null) {
const now = Date.now();
return accounts.filter(acc => {
if (excludeId && acc.id === excludeId) return false;
if (acc.rateLimitedUntil) {
const until = new Date(acc.rateLimitedUntil).getTime();
if (until > now) return false;
}
return true;
});
}
/**
* Reset account state when request succeeds
* Clears cooldown and resets backoff level to 0
* @param {object} account - Account object
* @returns {object} Updated account with reset state
*/
export function resetAccountState(account) {
if (!account) return account;
return {
...account,
rateLimitedUntil: null,
backoffLevel: 0,
lastError: null,
status: "active"
};
}
/**
* Apply error state to account
* @param {object} account - Account object
* @param {number} status - HTTP status code
* @param {string} errorText - Error message
* @returns {object} Updated account with error state
*/
export function applyErrorState(account, status, errorText) {
if (!account) return account;
const backoffLevel = account.backoffLevel || 0;
const { cooldownMs, newBackoffLevel } = checkFallbackError(status, errorText, backoffLevel);
return {
...account,
rateLimitedUntil: cooldownMs > 0 ? getUnavailableUntil(cooldownMs) : null,
backoffLevel: newBackoffLevel ?? backoffLevel,
lastError: { status, message: errorText, timestamp: new Date().toISOString() },
status: "error"
};
}

View File

@@ -0,0 +1,69 @@
/**
* Shared combo (model combo) handling with fallback support
*/
/**
* Get combo models from combos data
* @param {string} modelStr - Model string to check
* @param {Array|Object} combosData - Array of combos or object with combos
* @returns {string[]|null} Array of models or null if not a combo
*/
export function getComboModelsFromData(modelStr, combosData) {
// Don't check if it's in provider/model format
if (modelStr.includes("/")) return null;
// Handle both array and object formats
const combos = Array.isArray(combosData) ? combosData : (combosData?.combos || []);
const combo = combos.find(c => c.name === modelStr);
if (combo && combo.models && combo.models.length > 0) {
return combo.models;
}
return null;
}
/**
* Handle combo chat with fallback
* @param {Object} options
* @param {Object} options.body - Request body
* @param {string[]} options.models - Array of model strings to try
* @param {Function} options.handleSingleModel - Function to handle single model: (body, modelStr) => Promise<Response>
* @param {Object} options.log - Logger object
* @returns {Promise<Response>}
*/
export async function handleComboChat({ body, models, handleSingleModel, log }) {
let lastError = null;
for (let i = 0; i < models.length; i++) {
const modelStr = models[i];
log.info("COMBO", `Trying model ${i + 1}/${models.length}: ${modelStr}`);
const result = await handleSingleModel(body, modelStr);
// Success (2xx) - return response
if (result.ok) {
return result;
}
// 401 unauthorized - return immediately (auth error)
if (result.status === 401) {
return result;
}
// 4xx/5xx - try next model
lastError = `${modelStr}: ${result.statusText || result.status}`;
log.warn("COMBO", `Model failed, trying next`, { model: modelStr, status: result.status });
}
log.warn("COMBO", "All models failed");
// Return 503 with last error
return new Response(
JSON.stringify({ error: lastError || "All combo models unavailable" }),
{
status: 503,
headers: { "Content-Type": "application/json" }
}
);
}

View File

@@ -0,0 +1,64 @@
/**
* Shared combo (model combo) handling with fallback support
*/
/**
* Get combo models from combos data
* @param {string} modelStr - Model string to check
* @param {Array|Object} combosData - Array of combos or object with combos
* @returns {string[]|null} Array of models or null if not a combo
*/
export function getComboModelsFromData(modelStr, combosData) {
// Don't check if it's in provider/model format
if (modelStr.includes("/")) return null;
// Handle both array and object formats
const combos = Array.isArray(combosData) ? combosData : (combosData?.combos || []);
const combo = combos.find(c => c.name === modelStr);
if (combo && combo.models && combo.models.length > 0) {
return combo.models;
}
return null;
}
/**
* Handle combo chat with fallback
* @param {Object} options
* @param {Object} options.body - Request body
* @param {string[]} options.models - Array of model strings to try
* @param {Function} options.handleSingleModel - Function to handle single model: (body, modelStr) => Promise<Response>
* @param {Object} options.log - Logger object
* @returns {Promise<Response>}
*/
export async function handleComboChat({ body, models, handleSingleModel, log }) {
let lastError = null;
for (let i = 0; i < models.length; i++) {
const modelStr = models[i];
log.info("COMBO", `Trying model ${i + 1}/${models.length}: ${modelStr}`);
const result = await handleSingleModel(body, modelStr);
// Success or client error - return response
if (result.ok || result.status < 500) {
return result;
}
// 5xx error - try next model
lastError = `${modelStr}: ${result.statusText || result.status}`;
log.warn("COMBO", `Model failed, trying next`, { model: modelStr, status: result.status });
}
log.warn("COMBO", "All models failed");
// Return 503 with last error
return new Response(
JSON.stringify({ error: lastError || "All combo models unavailable" }),
{
status: 503,
headers: { "Content-Type": "application/json" }
}
);
}

109
open-sse/services/model.js Normal file
View File

@@ -0,0 +1,109 @@
// Provider alias to ID mapping
const ALIAS_TO_PROVIDER_ID = {
cc: "claude",
cx: "codex",
gc: "gemini-cli",
qw: "qwen",
if: "iflow",
ag: "antigravity",
gh: "github",
// API Key providers (alias = id)
openai: "openai",
anthropic: "anthropic",
gemini: "gemini",
openrouter: "openrouter",
};
/**
* Resolve provider alias to provider ID
*/
export function resolveProviderAlias(aliasOrId) {
return ALIAS_TO_PROVIDER_ID[aliasOrId] || aliasOrId;
}
/**
* Parse model string: "alias/model" or "provider/model" or just alias
*/
export function parseModel(modelStr) {
if (!modelStr) {
return { provider: null, model: null, isAlias: false, providerAlias: null };
}
// Check if standard format: provider/model or alias/model
if (modelStr.includes("/")) {
const firstSlash = modelStr.indexOf("/");
const providerOrAlias = modelStr.slice(0, firstSlash);
const model = modelStr.slice(firstSlash + 1);
const provider = resolveProviderAlias(providerOrAlias);
return { provider, model, isAlias: false, providerAlias: providerOrAlias };
}
// Alias format (model alias, not provider alias)
return { provider: null, model: modelStr, isAlias: true, providerAlias: null };
}
/**
* Resolve model alias from aliases object
* Format: { "alias": "provider/model" }
*/
export function resolveModelAliasFromMap(alias, aliases) {
if (!aliases) return null;
// Check if alias exists
const resolved = aliases[alias];
if (!resolved) return null;
// Resolved value is "provider/model" format
if (typeof resolved === "string" && resolved.includes("/")) {
const firstSlash = resolved.indexOf("/");
const providerOrAlias = resolved.slice(0, firstSlash);
return {
provider: resolveProviderAlias(providerOrAlias),
model: resolved.slice(firstSlash + 1)
};
}
// Or object { provider, model }
if (typeof resolved === "object" && resolved.provider && resolved.model) {
return {
provider: resolveProviderAlias(resolved.provider),
model: resolved.model
};
}
return null;
}
/**
* Get full model info (parse or resolve)
* @param {string} modelStr - Model string
* @param {object|function} aliasesOrGetter - Aliases object or async function to get aliases
*/
export async function getModelInfoCore(modelStr, aliasesOrGetter) {
const parsed = parseModel(modelStr);
if (!parsed.isAlias) {
return {
provider: parsed.provider,
model: parsed.model
};
}
// Get aliases (from object or function)
const aliases = typeof aliasesOrGetter === "function"
? await aliasesOrGetter()
: aliasesOrGetter;
// Resolve alias
const resolved = resolveModelAliasFromMap(parsed.model, aliases);
if (resolved) {
return resolved;
}
// Fallback: treat as openai model
return {
provider: "openai",
model: parsed.model
};
}

View File

@@ -0,0 +1,237 @@
import { PROVIDERS } from "../config/constants.js";
// Detect request format from body structure
export function detectFormat(body) {
// OpenAI Responses API: has input[] array instead of messages[]
if (body.input && Array.isArray(body.input)) {
return "openai-responses";
}
// Gemini format: has contents array
if (body.contents && Array.isArray(body.contents)) {
return "gemini";
}
// OpenAI-specific indicators (check BEFORE Claude)
// These fields are OpenAI-specific and never appear in Claude format
if (
body.stream_options || // OpenAI streaming options
body.response_format || // JSON mode, etc.
body.logprobs !== undefined || // Log probabilities
body.top_logprobs !== undefined ||
body.n !== undefined || // Number of completions
body.presence_penalty !== undefined || // Penalties
body.frequency_penalty !== undefined ||
body.logit_bias || // Token biasing
body.user // User identifier
) {
return "openai";
}
// Claude format: messages with content as array of objects with type
// Claude requires content to be array with specific structure
if (body.messages && Array.isArray(body.messages)) {
const firstMsg = body.messages[0];
// If content is array, check if it follows Claude structure
if (firstMsg?.content && Array.isArray(firstMsg.content)) {
const firstContent = firstMsg.content[0];
// Claude format has specific types: text, image, tool_use, tool_result
// OpenAI multimodal has: text, image_url (note the difference)
if (firstContent?.type === "text" && !body.model?.includes("/")) {
// Could be Claude or OpenAI multimodal
// Check for Claude-specific fields
if (body.system || body.anthropic_version) {
return "claude";
}
// Check if image format is Claude (source.type) vs OpenAI (image_url.url)
const hasClaudeImage = firstMsg.content.some(c =>
c.type === "image" && c.source?.type === "base64"
);
const hasOpenAIImage = firstMsg.content.some(c =>
c.type === "image_url" && c.image_url?.url
);
if (hasClaudeImage) return "claude";
if (hasOpenAIImage) return "openai";
// If still unclear, check for tool format
const hasClaudeTool = firstMsg.content.some(c =>
c.type === "tool_use" || c.type === "tool_result"
);
if (hasClaudeTool) return "claude";
}
}
// If content is string, it's likely OpenAI (Claude also supports this)
// Check for other Claude-specific indicators
if (body.system !== undefined || body.anthropic_version) {
return "claude";
}
}
// Default to OpenAI format
return "openai";
}
// Get provider config
export function getProviderConfig(provider) {
return PROVIDERS[provider] || PROVIDERS.openai;
}
// Build provider URL
export function buildProviderUrl(provider, model, stream = true) {
const config = getProviderConfig(provider);
switch (provider) {
case "claude":
return `${config.baseUrl}?beta=true`;
case "gemini": {
const action = stream ? "streamGenerateContent?alt=sse" : "generateContent";
return `${config.baseUrl}/${model}:${action}`;
}
case "gemini-cli": {
const action = stream ? "streamGenerateContent?alt=sse" : "generateContent";
return `${config.baseUrl}:${action}`;
}
case "antigravity": {
const baseUrl = config.baseUrls[0];
const path = stream ? "/v1internal:streamGenerateContent?alt=sse" : "/v1internal:generateContent";
return `${baseUrl}${path}`;
}
case "codex":
return config.baseUrl;
case "github":
return config.baseUrl;
case "glm":
case "kimi":
case "minimax":
// Claude-compatible providers
return `${config.baseUrl}?beta=true`;
default:
return config.baseUrl;
}
}
// Build provider headers
export function buildProviderHeaders(provider, credentials, stream = true, body = null) {
const config = getProviderConfig(provider);
const headers = {
"Content-Type": "application/json",
...config.headers
};
// Add auth header
switch (provider) {
case "gemini":
if (credentials.apiKey) {
headers["x-goog-api-key"] = credentials.apiKey;
} else if (credentials.accessToken) {
headers["Authorization"] = `Bearer ${credentials.accessToken}`;
}
break;
case "antigravity":
case "gemini-cli":
// Antigravity and Gemini CLI use OAuth access token
headers["Authorization"] = `Bearer ${credentials.accessToken}`;
break;
case "claude":
// Claude uses x-api-key header for API key, or Authorization for OAuth
if (credentials.apiKey) {
headers["x-api-key"] = credentials.apiKey;
} else if (credentials.accessToken) {
headers["Authorization"] = `Bearer ${credentials.accessToken}`;
}
break;
case "github":
// GitHub Copilot requires special headers to mimic VSCode
// Prioritize copilotToken from providerSpecificData, fallback to accessToken
const githubToken = credentials.copilotToken || credentials.accessToken;
// Add headers in exact same order as test endpoint
headers["Authorization"] = `Bearer ${githubToken}`;
headers["Content-Type"] = "application/json";
headers["copilot-integration-id"] = "vscode-chat";
headers["editor-version"] = "vscode/1.107.1";
headers["editor-plugin-version"] = "copilot-chat/0.26.7";
headers["user-agent"] = "GitHubCopilotChat/0.26.7";
headers["openai-intent"] = "conversation-panel";
headers["x-github-api-version"] = "2025-04-01";
// Generate a UUID for x-request-id (Cloudflare Workers compatible)
headers["x-request-id"] = crypto.randomUUID ? crypto.randomUUID() :
'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
const r = Math.random() * 16 | 0;
const v = c == 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
headers["x-vscode-user-agent-library-version"] = "electron-fetch";
headers["X-Initiator"] = "user";
headers["Accept"] = "application/json";
break;
case "codex":
case "qwen":
case "openai":
case "openrouter":
headers["Authorization"] = `Bearer ${credentials.apiKey || credentials.accessToken}`;
break;
case "glm":
case "kimi":
case "minimax":
// Claude-compatible API providers use x-api-key
headers["x-api-key"] = credentials.apiKey;
break;
default:
headers["Authorization"] = `Bearer ${credentials.apiKey || credentials.accessToken}`;
break;
}
// Stream accept header
if (stream) {
headers["Accept"] = "text/event-stream";
}
return headers;
}
// Get target format for provider
export function getTargetFormat(provider) {
const config = getProviderConfig(provider);
return config.format || "openai";
}
// Check if last message is from user
export function isLastMessageFromUser(body) {
const messages = body.messages || body.contents;
if (!messages?.length) return true;
const lastMsg = messages[messages.length - 1];
return lastMsg?.role === "user";
}
// Check if request has thinking config
export function hasThinkingConfig(body) {
return !!(body.reasoning_effort || body.thinking?.type === "enabled");
}
// Normalize thinking config based on last message role
// - If lastMessage is not user → remove thinking config
// - If lastMessage is user AND has thinking config → keep it (force enable)
export function normalizeThinkingConfig(body) {
if (!isLastMessageFromUser(body)) {
delete body.reasoning_effort;
delete body.thinking;
}
return body;
}

View File

@@ -0,0 +1,542 @@
import { PROVIDERS, OAUTH_ENDPOINTS } from "../config/constants.js";
// Token expiry buffer (refresh if expires within 5 minutes)
export const TOKEN_EXPIRY_BUFFER_MS = 5 * 60 * 1000;
/**
* Refresh OAuth access token using refresh token
*/
export async function refreshAccessToken(provider, refreshToken, credentials, log) {
const config = PROVIDERS[provider];
if (!config || !config.refreshUrl) {
log?.warn?.("TOKEN_REFRESH", `No refresh URL configured for provider: ${provider}`);
return null;
}
if (!refreshToken) {
log?.warn?.("TOKEN_REFRESH", `No refresh token available for provider: ${provider}`);
return null;
}
try {
const response = await fetch(config.refreshUrl, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
Accept: "application/json",
},
body: new URLSearchParams({
grant_type: "refresh_token",
refresh_token: refreshToken,
client_id: config.clientId,
client_secret: config.clientSecret,
}),
});
if (!response.ok) {
const errorText = await response.text();
log?.error?.("TOKEN_REFRESH", `Failed to refresh token for ${provider}`, {
status: response.status,
error: errorText,
});
return null;
}
const tokens = await response.json();
log?.info?.("TOKEN_REFRESH", `Successfully refreshed token for ${provider}`, {
hasNewAccessToken: !!tokens.access_token,
hasNewRefreshToken: !!tokens.refresh_token,
expiresIn: tokens.expires_in,
});
return {
accessToken: tokens.access_token,
refreshToken: tokens.refresh_token || refreshToken,
expiresIn: tokens.expires_in,
};
} catch (error) {
log?.error?.("TOKEN_REFRESH", `Error refreshing token for ${provider}`, {
error: error.message,
});
return null;
}
}
/**
* Specialized refresh for Claude OAuth tokens
*/
export async function refreshClaudeOAuthToken(refreshToken, log) {
const response = await fetch(OAUTH_ENDPOINTS.anthropic.token, {
method: "POST",
headers: {
"Content-Type": "application/json",
Accept: "application/json",
},
body: JSON.stringify({
grant_type: "refresh_token",
refresh_token: refreshToken,
client_id: PROVIDERS.claude.clientId,
}),
});
if (!response.ok) {
const errorText = await response.text();
log?.error?.("TOKEN_REFRESH", "Failed to refresh Claude OAuth token", {
status: response.status,
error: errorText,
});
return null;
}
const tokens = await response.json();
log?.info?.("TOKEN_REFRESH", "Successfully refreshed Claude OAuth token", {
hasNewAccessToken: !!tokens.access_token,
hasNewRefreshToken: !!tokens.refresh_token,
expiresIn: tokens.expires_in,
});
return {
accessToken: tokens.access_token,
refreshToken: tokens.refresh_token || refreshToken,
expiresIn: tokens.expires_in,
};
}
/**
* Specialized refresh for Google providers (Gemini, Antigravity)
*/
export async function refreshGoogleToken(refreshToken, clientId, clientSecret, log) {
const response = await fetch(OAUTH_ENDPOINTS.google.token, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
Accept: "application/json",
},
body: new URLSearchParams({
grant_type: "refresh_token",
refresh_token: refreshToken,
client_id: clientId,
client_secret: clientSecret,
}),
});
if (!response.ok) {
const errorText = await response.text();
log?.error?.("TOKEN_REFRESH", "Failed to refresh Google token", {
status: response.status,
error: errorText,
});
return null;
}
const tokens = await response.json();
log?.info?.("TOKEN_REFRESH", "Successfully refreshed Google token", {
hasNewAccessToken: !!tokens.access_token,
hasNewRefreshToken: !!tokens.refresh_token,
expiresIn: tokens.expires_in,
});
return {
accessToken: tokens.access_token,
refreshToken: tokens.refresh_token || refreshToken,
expiresIn: tokens.expires_in,
};
}
/**
* Specialized refresh for Qwen OAuth tokens
*/
export async function refreshQwenToken(refreshToken, log) {
const endpoint = OAUTH_ENDPOINTS.qwen.token;
try {
const response = await fetch(endpoint, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
Accept: "application/json",
},
body: new URLSearchParams({
grant_type: "refresh_token",
refresh_token: refreshToken,
client_id: PROVIDERS.qwen.clientId,
}),
});
if (response.status === 200) {
const tokens = await response.json();
log?.info?.("TOKEN_REFRESH", "Successfully refreshed Qwen token", {
hasNewAccessToken: !!tokens.access_token,
hasNewRefreshToken: !!tokens.refresh_token,
expiresIn: tokens.expires_in,
});
return {
accessToken: tokens.access_token,
refreshToken: tokens.refresh_token || refreshToken,
expiresIn: tokens.expires_in,
};
} else {
const errorText = await response.text().catch(() => "");
log?.warn?.("TOKEN_REFRESH", `Error with Qwen endpoint`, {
status: response.status,
error: errorText,
});
}
} catch (error) {
log?.warn?.("TOKEN_REFRESH", `Network error trying Qwen endpoint`, {
error: error.message,
});
}
log?.error?.("TOKEN_REFRESH", "Failed to refresh Qwen token");
return null;
}
/**
* Specialized refresh for Codex (OpenAI) OAuth tokens
*/
export async function refreshCodexToken(refreshToken, log) {
const response = await fetch(OAUTH_ENDPOINTS.openai.token, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
Accept: "application/json",
},
body: new URLSearchParams({
grant_type: "refresh_token",
refresh_token: refreshToken,
client_id: PROVIDERS.codex.clientId,
scope: "openid profile email offline_access",
}),
});
if (!response.ok) {
const errorText = await response.text();
log?.error?.("TOKEN_REFRESH", "Failed to refresh Codex token", {
status: response.status,
error: errorText,
});
return null;
}
const tokens = await response.json();
log?.info?.("TOKEN_REFRESH", "Successfully refreshed Codex token", {
hasNewAccessToken: !!tokens.access_token,
hasNewRefreshToken: !!tokens.refresh_token,
expiresIn: tokens.expires_in,
});
return {
accessToken: tokens.access_token,
refreshToken: tokens.refresh_token || refreshToken,
expiresIn: tokens.expires_in,
};
}
/**
* Specialized refresh for iFlow OAuth tokens
*/
export async function refreshIflowToken(refreshToken, log) {
const basicAuth = btoa(`${PROVIDERS.iflow.clientId}:${PROVIDERS.iflow.clientSecret}`);
const response = await fetch(OAUTH_ENDPOINTS.iflow.token, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
Accept: "application/json",
Authorization: `Basic ${basicAuth}`,
},
body: new URLSearchParams({
grant_type: "refresh_token",
refresh_token: refreshToken,
client_id: PROVIDERS.iflow.clientId,
client_secret: PROVIDERS.iflow.clientSecret,
}),
});
if (!response.ok) {
const errorText = await response.text();
log?.error?.("TOKEN_REFRESH", "Failed to refresh iFlow token", {
status: response.status,
error: errorText,
});
return null;
}
const tokens = await response.json();
log?.info?.("TOKEN_REFRESH", "Successfully refreshed iFlow token", {
hasNewAccessToken: !!tokens.access_token,
hasNewRefreshToken: !!tokens.refresh_token,
expiresIn: tokens.expires_in,
});
return {
accessToken: tokens.access_token,
refreshToken: tokens.refresh_token || refreshToken,
expiresIn: tokens.expires_in,
};
}
/**
* Specialized refresh for GitHub Copilot OAuth tokens
*/
export async function refreshGitHubToken(refreshToken, log) {
const response = await fetch(OAUTH_ENDPOINTS.github.token, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
Accept: "application/json",
},
body: new URLSearchParams({
grant_type: "refresh_token",
refresh_token: refreshToken,
client_id: PROVIDERS.github.clientId,
client_secret: PROVIDERS.github.clientSecret,
}),
});
if (!response.ok) {
const errorText = await response.text();
log?.error?.("TOKEN_REFRESH", "Failed to refresh GitHub token", {
status: response.status,
error: errorText,
});
return null;
}
const tokens = await response.json();
log?.info?.("TOKEN_REFRESH", "Successfully refreshed GitHub token", {
hasNewAccessToken: !!tokens.access_token,
hasNewRefreshToken: !!tokens.refresh_token,
expiresIn: tokens.expires_in,
});
return {
accessToken: tokens.access_token,
refreshToken: tokens.refresh_token || refreshToken,
expiresIn: tokens.expires_in,
};
}
/**
* Refresh GitHub Copilot token using GitHub access token
*/
export async function refreshCopilotToken(githubAccessToken, log) {
try {
const response = await fetch("https://api.github.com/copilot_internal/v2/token", {
headers: {
"Authorization": `Bearer ${githubAccessToken}`,
"User-Agent": "GitHub-Copilot/1.0",
"Accept": "*/*"
}
});
if (!response.ok) {
const errorText = await response.text();
log?.error?.("TOKEN_REFRESH", "Failed to refresh Copilot token", {
status: response.status,
error: errorText
});
return null;
}
const data = await response.json();
log?.info?.("TOKEN_REFRESH", "Successfully refreshed Copilot token", {
hasToken: !!data.token,
expiresAt: data.expires_at
});
return {
token: data.token,
expiresAt: data.expires_at
};
} catch (error) {
log?.error?.("TOKEN_REFRESH", "Error refreshing Copilot token", {
error: error.message
});
return null;
}
}
/**
* Get access token for a specific provider
*/
export async function getAccessToken(provider, credentials, log) {
if (!credentials || !credentials.refreshToken) {
log?.warn?.("TOKEN_REFRESH", `No refresh token available for provider: ${provider}`);
return null;
}
switch (provider) {
case "gemini":
case "gemini-cli":
case "antigravity":
return await refreshGoogleToken(
credentials.refreshToken,
PROVIDERS[provider].clientId,
PROVIDERS[provider].clientSecret,
log
);
case "claude":
return await refreshClaudeOAuthToken(credentials.refreshToken, log);
case "codex":
return await refreshCodexToken(credentials.refreshToken, log);
case "qwen":
return await refreshQwenToken(credentials.refreshToken, log);
case "iflow":
return await refreshIflowToken(credentials.refreshToken, log);
case "github":
return await refreshGitHubToken(credentials.refreshToken, log);
default:
log?.warn?.("TOKEN_REFRESH", `Unsupported provider for token refresh: ${provider}`);
return null;
}
}
/**
* Refresh token by provider type (helper for handlers)
*/
export async function refreshTokenByProvider(provider, credentials, log) {
if (!credentials.refreshToken) return null;
switch (provider) {
case "gemini-cli":
case "antigravity":
return refreshGoogleToken(
credentials.refreshToken,
PROVIDERS[provider].clientId,
PROVIDERS[provider].clientSecret,
log
);
case "claude":
return refreshClaudeOAuthToken(credentials.refreshToken, log);
case "codex":
return refreshCodexToken(credentials.refreshToken, log);
case "qwen":
return refreshQwenToken(credentials.refreshToken, log);
case "iflow":
return refreshIflowToken(credentials.refreshToken, log);
case "github":
return refreshGitHubToken(credentials.refreshToken, log);
default:
return refreshAccessToken(provider, credentials.refreshToken, credentials, log);
}
}
/**
* Format credentials for provider
*/
export function formatProviderCredentials(provider, credentials, log) {
const config = PROVIDERS[provider];
if (!config) {
log?.warn?.("TOKEN_REFRESH", `No configuration found for provider: ${provider}`);
return null;
}
switch (provider) {
case "gemini":
return {
apiKey: credentials.apiKey,
accessToken: credentials.accessToken,
projectId: credentials.projectId
};
case "claude":
return {
apiKey: credentials.apiKey,
accessToken: credentials.accessToken
};
case "codex":
case "qwen":
case "iflow":
case "openai":
case "openrouter":
return {
apiKey: credentials.apiKey,
accessToken: credentials.accessToken
};
case "antigravity":
case "gemini-cli":
return {
accessToken: credentials.accessToken,
refreshToken: credentials.refreshToken
};
default:
return {
apiKey: credentials.apiKey,
accessToken: credentials.accessToken,
refreshToken: credentials.refreshToken
};
}
}
/**
* Get all access tokens for a user
*/
export async function getAllAccessTokens(userInfo, log) {
const results = {};
if (userInfo.connections && Array.isArray(userInfo.connections)) {
for (const connection of userInfo.connections) {
if (connection.isActive && connection.provider) {
const token = await getAccessToken(connection.provider, {
refreshToken: connection.refreshToken
}, log);
if (token) {
results[connection.provider] = token;
}
}
}
}
return results;
}
/**
* Refresh token with retry and exponential backoff
* Retries on failure with increasing delay: 1s, 2s, 3s...
* @param {function} refreshFn - Async function that returns token or null
* @param {number} maxRetries - Max retry attempts (default 3)
* @param {object} log - Logger instance (optional)
* @returns {Promise<object|null>} Token result or null if all retries fail
*/
export async function refreshWithRetry(refreshFn, maxRetries = 3, log = null) {
for (let attempt = 0; attempt < maxRetries; attempt++) {
if (attempt > 0) {
const delay = attempt * 1000;
log?.debug?.("TOKEN_REFRESH", `Retry ${attempt}/${maxRetries} after ${delay}ms`);
await new Promise(r => setTimeout(r, delay));
}
try {
const result = await refreshFn();
if (result) return result;
} catch (error) {
log?.warn?.("TOKEN_REFRESH", `Attempt ${attempt + 1}/${maxRetries} failed: ${error.message}`);
}
}
log?.error?.("TOKEN_REFRESH", `All ${maxRetries} retry attempts failed`);
return null;
}

398
open-sse/services/usage.js Normal file
View File

@@ -0,0 +1,398 @@
/**
* Usage Fetcher - Get usage data from provider APIs
*/
// GitHub API config
const GITHUB_CONFIG = {
apiVersion: "2022-11-28",
userAgent: "GitHubCopilotChat/0.26.7",
};
// Antigravity API config (from Quotio)
const ANTIGRAVITY_CONFIG = {
quotaApiUrl: "https://cloudcode-pa.googleapis.com/v1internal:fetchAvailableModels",
loadProjectApiUrl: "https://cloudcode-pa.googleapis.com/v1internal:loadCodeAssist",
tokenUrl: "https://oauth2.googleapis.com/token",
clientId: "1071006060591-tmhssin2h21lcre235vtolojh4g403ep.apps.googleusercontent.com",
clientSecret: "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",
userAgent: "antigravity/1.11.3 Darwin/arm64",
};
// Codex (OpenAI) API config
const CODEX_CONFIG = {
usageUrl: "https://chatgpt.com/backend-api/wham/usage",
};
// Claude API config
const CLAUDE_CONFIG = {
usageUrl: "https://api.anthropic.com/v1/organizations/{org_id}/usage",
settingsUrl: "https://api.anthropic.com/v1/settings",
};
/**
* Get usage data for a provider connection
* @param {Object} connection - Provider connection with accessToken
* @returns {Object} Usage data with quotas
*/
export async function getUsageForProvider(connection) {
const { provider, accessToken, providerSpecificData } = connection;
switch (provider) {
case "github":
return await getGitHubUsage(accessToken, providerSpecificData);
case "gemini-cli":
return await getGeminiUsage(accessToken);
case "antigravity":
return await getAntigravityUsage(accessToken);
case "claude":
return await getClaudeUsage(accessToken);
case "codex":
return await getCodexUsage(accessToken);
case "qwen":
return await getQwenUsage(accessToken, providerSpecificData);
case "iflow":
return await getIflowUsage(accessToken);
default:
return { message: `Usage API not implemented for ${provider}` };
}
}
/**
* GitHub Copilot Usage
*/
async function getGitHubUsage(accessToken, providerSpecificData) {
try {
const response = await fetch("https://api.github.com/copilot_internal/user", {
headers: {
"Authorization": `Bearer ${accessToken}`,
"Accept": "application/json",
"X-GitHub-Api-Version": GITHUB_CONFIG.apiVersion,
"User-Agent": GITHUB_CONFIG.userAgent,
},
});
if (!response.ok) {
const error = await response.text();
throw new Error(`GitHub API error: ${error}`);
}
const data = await response.json();
// Handle different response formats (paid vs free)
if (data.quota_snapshots) {
// Paid plan format
const snapshots = data.quota_snapshots;
return {
plan: data.copilot_plan,
resetDate: data.quota_reset_date,
quotas: {
chat: formatGitHubQuotaSnapshot(snapshots.chat),
completions: formatGitHubQuotaSnapshot(snapshots.completions),
premium_interactions: formatGitHubQuotaSnapshot(snapshots.premium_interactions),
},
};
} else if (data.monthly_quotas || data.limited_user_quotas) {
// Free/limited plan format
const monthlyQuotas = data.monthly_quotas || {};
const usedQuotas = data.limited_user_quotas || {};
return {
plan: data.copilot_plan || data.access_type_sku,
resetDate: data.limited_user_reset_date,
quotas: {
chat: {
used: usedQuotas.chat || 0,
total: monthlyQuotas.chat || 0,
unlimited: false,
},
completions: {
used: usedQuotas.completions || 0,
total: monthlyQuotas.completions || 0,
unlimited: false,
},
},
};
}
return { message: "GitHub Copilot connected. Unable to parse quota data." };
} catch (error) {
throw new Error(`Failed to fetch GitHub usage: ${error.message}`);
}
}
function formatGitHubQuotaSnapshot(quota) {
if (!quota) return { used: 0, total: 0, unlimited: true };
return {
used: quota.entitlement - quota.remaining,
total: quota.entitlement,
remaining: quota.remaining,
unlimited: quota.unlimited || false,
};
}
/**
* Gemini CLI Usage (Google Cloud)
*/
async function getGeminiUsage(accessToken) {
try {
// Gemini CLI uses Google Cloud quotas
// Try to get quota info from Cloud Resource Manager
const response = await fetch(
"https://cloudresourcemanager.googleapis.com/v1/projects?filter=lifecycleState:ACTIVE",
{
headers: {
Authorization: `Bearer ${accessToken}`,
Accept: "application/json",
},
}
);
if (!response.ok) {
// Quota API may not be accessible, return generic message
return { message: "Gemini CLI uses Google Cloud quotas. Check Google Cloud Console for details." };
}
return { message: "Gemini CLI connected. Usage tracked via Google Cloud Console." };
} catch (error) {
return { message: "Unable to fetch Gemini usage. Check Google Cloud Console." };
}
}
/**
* Antigravity Usage - Fetch quota from Google Cloud Code API
*/
async function getAntigravityUsage(accessToken, providerSpecificData) {
try {
// First get project ID from subscription info
const projectId = await getAntigravityProjectId(accessToken);
// Fetch quota data
const response = await fetch(ANTIGRAVITY_CONFIG.quotaApiUrl, {
method: "POST",
headers: {
"Authorization": `Bearer ${accessToken}`,
"User-Agent": ANTIGRAVITY_CONFIG.userAgent,
"Content-Type": "application/json",
},
body: JSON.stringify(projectId ? { project: projectId } : {}),
});
if (response.status === 403) {
return { message: "Antigravity access forbidden. Check subscription." };
}
if (!response.ok) {
throw new Error(`Antigravity API error: ${response.status}`);
}
const data = await response.json();
const quotas = {};
// Parse model quotas
if (data.models) {
for (const [name, info] of Object.entries(data.models)) {
// Only include gemini and claude models
if (!name.includes("gemini") && !name.includes("claude")) continue;
if (info.quotaInfo) {
const percentage = (info.quotaInfo.remainingFraction || 0) * 100;
quotas[name] = {
remaining: percentage,
resetTime: info.quotaInfo.resetTime || "",
unlimited: false,
};
}
}
}
// Get subscription info for plan type
const subscriptionInfo = await getAntigravitySubscriptionInfo(accessToken);
return {
plan: subscriptionInfo?.currentTier?.name || "Unknown",
quotas,
subscriptionInfo,
};
} catch (error) {
return { message: `Antigravity error: ${error.message}` };
}
}
/**
* Get Antigravity project ID from subscription info
*/
async function getAntigravityProjectId(accessToken) {
try {
const info = await getAntigravitySubscriptionInfo(accessToken);
return info?.cloudaicompanionProject || null;
} catch {
return null;
}
}
/**
* Get Antigravity subscription info
*/
async function getAntigravitySubscriptionInfo(accessToken) {
try {
const response = await fetch(ANTIGRAVITY_CONFIG.loadProjectApiUrl, {
method: "POST",
headers: {
"Authorization": `Bearer ${accessToken}`,
"User-Agent": ANTIGRAVITY_CONFIG.userAgent,
"Content-Type": "application/json",
},
body: JSON.stringify({ metadata: { ideType: "ANTIGRAVITY" } }),
});
if (!response.ok) return null;
return await response.json();
} catch {
return null;
}
}
/**
* Claude Usage - Try to fetch from Anthropic API
*/
async function getClaudeUsage(accessToken) {
try {
// Try to get organization/account settings first
const settingsResponse = await fetch("https://api.anthropic.com/v1/settings", {
method: "GET",
headers: {
"Authorization": `Bearer ${accessToken}`,
"Content-Type": "application/json",
"anthropic-version": "2023-06-01",
},
});
if (settingsResponse.ok) {
const settings = await settingsResponse.json();
// Try usage endpoint if we have org info
if (settings.organization_id) {
const usageResponse = await fetch(
`https://api.anthropic.com/v1/organizations/${settings.organization_id}/usage`,
{
method: "GET",
headers: {
"Authorization": `Bearer ${accessToken}`,
"Content-Type": "application/json",
"anthropic-version": "2023-06-01",
},
}
);
if (usageResponse.ok) {
const usage = await usageResponse.json();
return {
plan: settings.plan || "Unknown",
organization: settings.organization_name,
quotas: usage,
};
}
}
return {
plan: settings.plan || "Unknown",
organization: settings.organization_name,
message: "Claude connected. Usage details require admin access.",
};
}
// If settings API fails, OAuth token may not have required scope
return { message: "Claude connected. Usage API requires admin permissions." };
} catch (error) {
return { message: `Claude connected. Unable to fetch usage: ${error.message}` };
}
}
/**
* Codex (OpenAI) Usage - Fetch from ChatGPT backend API
*/
async function getCodexUsage(accessToken) {
try {
const response = await fetch(CODEX_CONFIG.usageUrl, {
method: "GET",
headers: {
"Authorization": `Bearer ${accessToken}`,
"Accept": "application/json",
},
});
if (!response.ok) {
throw new Error(`Codex API error: ${response.status}`);
}
const data = await response.json();
// Parse rate limit info
const rateLimit = data.rate_limit || {};
const primaryWindow = rateLimit.primary_window || {};
const secondaryWindow = rateLimit.secondary_window || {};
// Calculate reset dates
const sessionResetAt = primaryWindow.reset_at
? new Date(primaryWindow.reset_at * 1000).toISOString()
: null;
const weeklyResetAt = secondaryWindow.reset_at
? new Date(secondaryWindow.reset_at * 1000).toISOString()
: null;
return {
plan: data.plan_type || "unknown",
limitReached: rateLimit.limit_reached || false,
quotas: {
session: {
used: primaryWindow.used_percent || 0,
total: 100,
remaining: 100 - (primaryWindow.used_percent || 0),
resetTime: sessionResetAt,
unlimited: false,
},
weekly: {
used: secondaryWindow.used_percent || 0,
total: 100,
remaining: 100 - (secondaryWindow.used_percent || 0),
resetTime: weeklyResetAt,
unlimited: false,
},
},
};
} catch (error) {
throw new Error(`Failed to fetch Codex usage: ${error.message}`);
}
}
/**
* Qwen Usage
*/
async function getQwenUsage(accessToken, providerSpecificData) {
try {
const resourceUrl = providerSpecificData?.resourceUrl;
if (!resourceUrl) {
return { message: "Qwen connected. No resource URL available." };
}
// Qwen may have usage endpoint at resource URL
return { message: "Qwen connected. Usage tracked per request." };
} catch (error) {
return { message: "Unable to fetch Qwen usage." };
}
}
/**
* iFlow Usage
*/
async function getIflowUsage(accessToken) {
try {
// iFlow may have usage endpoint
return { message: "iFlow connected. Usage tracked per request." };
} catch (error) {
return { message: "Unable to fetch iFlow usage." };
}
}

View File

@@ -0,0 +1,439 @@
/**
* Responses API Transformer
* Converts OpenAI Chat Completions SSE to Codex Responses API SSE format
* Can be used in both Next.js and Cloudflare Workers
*/
import fs from "fs";
import path from "path";
// Create log directory for responses (Node.js only)
export function createResponsesLogger(model, logsDir = null) {
// Skip logging in worker environment (no fs)
if (typeof fs.mkdirSync !== "function") {
return null;
}
const timestamp = new Date().toISOString().replace(/[:.]/g, "").slice(0, 15);
const uniqueId = Math.random().toString(36).slice(2, 8);
const baseDir = logsDir || (typeof process !== "undefined" ? process.cwd() : ".");
const logDir = path.join(baseDir, "logs", `responses_${model}_${timestamp}_${uniqueId}`);
try {
fs.mkdirSync(logDir, { recursive: true });
} catch {
return null;
}
let inputEvents = [];
let outputEvents = [];
return {
logInput: (event) => {
inputEvents.push(event);
},
logOutput: (event) => {
outputEvents.push(event);
},
flush: () => {
try {
fs.writeFileSync(path.join(logDir, "1_input_stream.txt"), inputEvents.join("\n"));
fs.writeFileSync(path.join(logDir, "2_output_stream.txt"), outputEvents.join("\n"));
} catch (e) {
console.log("[RESPONSES] Failed to write logs:", e.message);
}
}
};
}
/**
* Create TransformStream that converts Chat Completions SSE to Responses API SSE
* @param {Object} logger - Optional logger instance
* @returns {TransformStream}
*/
export function createResponsesApiTransformStream(logger = null) {
const state = {
seq: 0,
responseId: `resp_${Date.now()}`,
created: Math.floor(Date.now() / 1000),
started: false,
msgTextBuf: {},
msgItemAdded: {},
msgContentAdded: {},
msgItemDone: {},
reasoningId: "",
reasoningIndex: -1,
reasoningBuf: "",
reasoningPartAdded: false,
reasoningDone: false,
inThinking: false,
funcArgsBuf: {},
funcNames: {},
funcCallIds: {},
funcArgsDone: {},
funcItemDone: {},
buffer: "",
completedSent: false
};
const encoder = new TextEncoder();
const nextSeq = () => ++state.seq;
const emit = (controller, eventType, data) => {
data.sequence_number = nextSeq();
const output = `event: ${eventType}\ndata: ${JSON.stringify(data)}\n\n`;
logger?.logOutput(output.trim());
controller.enqueue(encoder.encode(output));
};
// Helper to start reasoning
const startReasoning = (controller, idx) => {
if (!state.reasoningId) {
state.reasoningId = `rs_${state.responseId}_${idx}`;
state.reasoningIndex = idx;
emit(controller, "response.output_item.added", {
type: "response.output_item.added",
output_index: idx,
item: {
id: state.reasoningId,
type: "reasoning",
summary: []
}
});
emit(controller, "response.reasoning_summary_part.added", {
type: "response.reasoning_summary_part.added",
item_id: state.reasoningId,
output_index: idx,
summary_index: 0,
part: { type: "summary_text", text: "" }
});
state.reasoningPartAdded = true;
}
};
const emitReasoningDelta = (controller, text) => {
if (!text) return;
state.reasoningBuf += text;
emit(controller, "response.reasoning_summary_text.delta", {
type: "response.reasoning_summary_text.delta",
item_id: state.reasoningId,
output_index: state.reasoningIndex,
summary_index: 0,
delta: text
});
};
const closeReasoning = (controller) => {
if (state.reasoningId && !state.reasoningDone) {
state.reasoningDone = true;
emit(controller, "response.reasoning_summary_text.done", {
type: "response.reasoning_summary_text.done",
item_id: state.reasoningId,
output_index: state.reasoningIndex,
summary_index: 0,
text: state.reasoningBuf
});
emit(controller, "response.reasoning_summary_part.done", {
type: "response.reasoning_summary_part.done",
item_id: state.reasoningId,
output_index: state.reasoningIndex,
summary_index: 0,
part: { type: "summary_text", text: state.reasoningBuf }
});
emit(controller, "response.output_item.done", {
type: "response.output_item.done",
output_index: state.reasoningIndex,
item: {
id: state.reasoningId,
type: "reasoning",
summary: [{ type: "summary_text", text: state.reasoningBuf }]
}
});
}
};
const closeMessage = (controller, idx) => {
if (state.msgItemAdded[idx] && !state.msgItemDone[idx]) {
state.msgItemDone[idx] = true;
const fullText = state.msgTextBuf[idx] || "";
const msgId = `msg_${state.responseId}_${idx}`;
emit(controller, "response.output_text.done", {
type: "response.output_text.done",
item_id: msgId,
output_index: parseInt(idx),
content_index: 0,
text: fullText,
logprobs: []
});
emit(controller, "response.content_part.done", {
type: "response.content_part.done",
item_id: msgId,
output_index: parseInt(idx),
content_index: 0,
part: { type: "output_text", annotations: [], logprobs: [], text: fullText }
});
emit(controller, "response.output_item.done", {
type: "response.output_item.done",
output_index: parseInt(idx),
item: {
id: msgId,
type: "message",
content: [{ type: "output_text", annotations: [], logprobs: [], text: fullText }],
role: "assistant"
}
});
}
};
const closeToolCall = (controller, idx) => {
const callId = state.funcCallIds[idx];
if (callId && !state.funcItemDone[idx]) {
const args = state.funcArgsBuf[idx] || "{}";
emit(controller, "response.function_call_arguments.done", {
type: "response.function_call_arguments.done",
item_id: `fc_${callId}`,
output_index: parseInt(idx),
arguments: args
});
emit(controller, "response.output_item.done", {
type: "response.output_item.done",
output_index: parseInt(idx),
item: {
id: `fc_${callId}`,
type: "function_call",
arguments: args,
call_id: callId,
name: state.funcNames[idx] || ""
}
});
state.funcItemDone[idx] = true;
state.funcArgsDone[idx] = true;
}
};
const sendCompleted = (controller) => {
if (!state.completedSent) {
state.completedSent = true;
emit(controller, "response.completed", {
type: "response.completed",
response: {
id: state.responseId,
object: "response",
created_at: state.created,
status: "completed",
background: false,
error: null
}
});
}
};
return new TransformStream({
transform(chunk, controller) {
const text = new TextDecoder().decode(chunk);
logger?.logInput(text.trim());
state.buffer += text;
const messages = state.buffer.split("\n\n");
state.buffer = messages.pop() || "";
for (const msg of messages) {
if (!msg.trim()) continue;
const dataMatch = msg.match(/^data:\s*(.+)$/m);
if (!dataMatch) continue;
const dataStr = dataMatch[1].trim();
if (dataStr === "[DONE]") continue;
let parsed;
try {
parsed = JSON.parse(dataStr);
} catch {
continue;
}
if (!parsed.choices?.length) continue;
const choice = parsed.choices[0];
const idx = choice.index || 0;
const delta = choice.delta || {};
// Emit initial events
if (!state.started) {
state.started = true;
state.responseId = parsed.id ? `resp_${parsed.id}` : state.responseId;
emit(controller, "response.created", {
type: "response.created",
response: {
id: state.responseId,
object: "response",
created_at: state.created,
status: "in_progress",
background: false,
error: null,
output: []
}
});
emit(controller, "response.in_progress", {
type: "response.in_progress",
response: {
id: state.responseId,
object: "response",
created_at: state.created,
status: "in_progress"
}
});
}
// Handle reasoning_content (OpenAI native format)
if (delta.reasoning_content) {
startReasoning(controller, idx);
emitReasoningDelta(controller, delta.reasoning_content);
}
// Handle text content (may contain <think> tags)
if (delta.content) {
let content = delta.content;
if (content.includes("<think>")) {
state.inThinking = true;
content = content.replace("<think>", "");
startReasoning(controller, idx);
}
if (content.includes("</think>")) {
const parts = content.split("</think>");
const thinkPart = parts[0];
const textPart = parts.slice(1).join("</think>");
if (thinkPart) emitReasoningDelta(controller, thinkPart);
closeReasoning(controller);
state.inThinking = false;
content = textPart;
}
if (state.inThinking && content) {
emitReasoningDelta(controller, content);
continue;
}
// Regular text content
if (content) {
if (!state.msgItemAdded[idx]) {
state.msgItemAdded[idx] = true;
const msgId = `msg_${state.responseId}_${idx}`;
emit(controller, "response.output_item.added", {
type: "response.output_item.added",
output_index: idx,
item: { id: msgId, type: "message", content: [], role: "assistant" }
});
}
if (!state.msgContentAdded[idx]) {
state.msgContentAdded[idx] = true;
emit(controller, "response.content_part.added", {
type: "response.content_part.added",
item_id: `msg_${state.responseId}_${idx}`,
output_index: idx,
content_index: 0,
part: { type: "output_text", annotations: [], logprobs: [], text: "" }
});
}
emit(controller, "response.output_text.delta", {
type: "response.output_text.delta",
item_id: `msg_${state.responseId}_${idx}`,
output_index: idx,
content_index: 0,
delta: content,
logprobs: []
});
if (!state.msgTextBuf[idx]) state.msgTextBuf[idx] = "";
state.msgTextBuf[idx] += content;
}
}
// Handle tool_calls
if (delta.tool_calls) {
closeMessage(controller, idx);
for (const tc of delta.tool_calls) {
const tcIdx = tc.index ?? 0;
const newCallId = tc.id;
const funcName = tc.function?.name;
if (funcName) state.funcNames[tcIdx] = funcName;
if (!state.funcCallIds[tcIdx] && newCallId) {
state.funcCallIds[tcIdx] = newCallId;
emit(controller, "response.output_item.added", {
type: "response.output_item.added",
output_index: tcIdx,
item: {
id: `fc_${newCallId}`,
type: "function_call",
arguments: "",
call_id: newCallId,
name: state.funcNames[tcIdx] || ""
}
});
}
if (!state.funcArgsBuf[tcIdx]) state.funcArgsBuf[tcIdx] = "";
if (tc.function?.arguments) {
const refCallId = state.funcCallIds[tcIdx] || newCallId;
if (refCallId) {
emit(controller, "response.function_call_arguments.delta", {
type: "response.function_call_arguments.delta",
item_id: `fc_${refCallId}`,
output_index: tcIdx,
delta: tc.function.arguments
});
}
state.funcArgsBuf[tcIdx] += tc.function.arguments;
}
}
}
// Handle finish_reason
if (choice.finish_reason) {
for (const i in state.msgItemAdded) closeMessage(controller, i);
closeReasoning(controller);
for (const i in state.funcCallIds) closeToolCall(controller, i);
sendCompleted(controller);
}
}
},
flush(controller) {
for (const i in state.msgItemAdded) closeMessage(controller, i);
closeReasoning(controller);
for (const i in state.funcCallIds) closeToolCall(controller, i);
sendCompleted(controller);
logger?.logOutput("data: [DONE]");
controller.enqueue(encoder.encode("data: [DONE]\n\n"));
logger?.flush();
}
});
}

View File

@@ -0,0 +1,12 @@
// Format identifiers
export const FORMATS = {
OPENAI: "openai",
OPENAI_RESPONSES: "openai-responses",
OPENAI_RESPONSE: "openai-response",
CLAUDE: "claude",
GEMINI: "gemini",
GEMINI_CLI: "gemini-cli",
CODEX: "codex",
ANTIGRAVITY: "antigravity"
};

View File

@@ -0,0 +1,348 @@
import { register } from "../index.js";
import { FORMATS } from "../formats.js";
// Create OpenAI chunk helper
function createChunk(state, delta, finishReason = null) {
return {
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: state.model,
choices: [{
index: 0,
delta,
finish_reason: finishReason
}]
};
}
// Convert Claude stream chunk to OpenAI format
function claudeToOpenAIResponse(chunk, state) {
if (!chunk) return null;
const results = [];
const event = chunk.type;
switch (event) {
case "message_start": {
state.messageId = chunk.message?.id || `msg_${Date.now()}`;
state.model = chunk.message?.model;
state.toolCallIndex = 0; // Reset tool call counter for OpenAI format
console.log("🔍 ----------- toolCallIndex", state.toolCallIndex);
results.push(createChunk(state, { role: "assistant" }));
break;
}
case "content_block_start": {
const block = chunk.content_block;
if (block?.type === "text") {
state.textBlockStarted = true;
} else if (block?.type === "thinking") {
// console.log("🧠 Thinking block started");
state.inThinkingBlock = true;
state.currentBlockIndex = chunk.index;
results.push(createChunk(state, { content: "<think>" }));
} else if (block?.type === "tool_use") {
// OpenAI format: tool_calls index must be independent and start from 0
const toolCallIndex = state.toolCallIndex++;
const toolCall = {
index: toolCallIndex,
id: block.id,
type: "function",
function: {
name: block.name,
arguments: ""
}
};
// Map Claude content_block index to OpenAI tool_call index
state.toolCalls.set(chunk.index, toolCall);
results.push(createChunk(state, { tool_calls: [toolCall] }));
}
break;
}
case "content_block_delta": {
const delta = chunk.delta;
if (delta?.type === "text_delta" && delta.text) {
results.push(createChunk(state, { content: delta.text }));
} else if (delta?.type === "thinking_delta" && delta.thinking) {
// Stream thinking content
results.push(createChunk(state, { content: delta.thinking }));
} else if (delta?.type === "input_json_delta" && delta.partial_json) {
const toolCall = state.toolCalls.get(chunk.index);
if (toolCall) {
toolCall.function.arguments += delta.partial_json;
// Include both index and id for better client compatibility
results.push(createChunk(state, {
tool_calls: [{
index: toolCall.index,
id: toolCall.id,
function: { arguments: delta.partial_json }
}]
}));
}
}
break;
}
case "content_block_stop": {
if (state.inThinkingBlock && chunk.index === state.currentBlockIndex) {
// console.log("✅ Thinking block ended");
results.push(createChunk(state, { content: "</think>" }));
state.inThinkingBlock = false;
}
state.textBlockStarted = false;
state.thinkingBlockStarted = false;
break;
}
case "message_delta": {
if (chunk.delta?.stop_reason) {
state.finishReason = convertStopReason(chunk.delta.stop_reason);
// Send the final chunk with finish_reason immediately
results.push({
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: state.model,
choices: [{
index: 0,
delta: {},
finish_reason: state.finishReason
}]
});
state.finishReasonSent = true;
}
// Usage is now extracted in stream.js extractUsage()
break;
}
case "message_stop": {
// CLIProxyAPI and OpenAI standard: message_stop should send the final chunk with finish_reason
// This ensures proper signaling to the client that the response is complete
// Only send a chunk if we haven't already sent the finish_reason in message_delta
// In some cases, finish_reason might not have been sent yet
if (!state.finishReasonSent) {
const finishReason = state.finishReason || (state.toolCalls?.size > 0 ? "tool_calls" : "stop");
results.push({
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: state.model,
choices: [{
index: 0,
delta: {},
finish_reason: finishReason
}],
...(state.usage && {
usage: {
prompt_tokens: state.usage.input_tokens || 0,
completion_tokens: state.usage.output_tokens || 0,
total_tokens: (state.usage.input_tokens || 0) + (state.usage.output_tokens || 0)
}
})
});
state.finishReasonSent = true;
}
break;
}
}
return results.length > 0 ? results : null;
}
// Helper: stop thinking block if started
function stopThinkingBlock(state, results) {
if (!state.thinkingBlockStarted) return;
results.push({
type: "content_block_stop",
index: state.thinkingBlockIndex
});
state.thinkingBlockStarted = false;
}
// Helper: stop text block if started
function stopTextBlock(state, results) {
if (!state.textBlockStarted || state.textBlockClosed) return;
state.textBlockClosed = true;
results.push({
type: "content_block_stop",
index: state.textBlockIndex
});
state.textBlockStarted = false;
}
// Convert OpenAI stream chunk to Claude format
function openaiToClaudeResponse(chunk, state) {
if (!chunk || !chunk.choices?.[0]) return null;
const results = [];
const choice = chunk.choices[0];
const delta = choice.delta;
// First chunk - ALWAYS send message_start first
if (!state.messageStartSent) {
state.messageStartSent = true;
state.messageId = chunk.id?.replace("chatcmpl-", "") || `msg_${Date.now()}`;
if (!state.messageId || state.messageId === "chat" || state.messageId.length < 8) {
state.messageId = chunk.extend_fields?.requestId ||
chunk.extend_fields?.traceId ||
`msg_${Date.now()}`;
}
state.model = chunk.model || "unknown";
state.nextBlockIndex = 0;
results.push({
type: "message_start",
message: {
id: state.messageId,
type: "message",
role: "assistant",
model: state.model,
content: [],
stop_reason: null,
stop_sequence: null,
usage: { input_tokens: 0, output_tokens: 0 }
}
});
}
// Handle reasoning_content (thinking) - GLM, DeepSeek, etc.
const reasoningContent = delta?.reasoning_content || delta?.reasoning;
if (reasoningContent) {
// Stop text block before thinking
stopTextBlock(state, results);
// Start thinking block if needed
if (!state.thinkingBlockStarted) {
state.thinkingBlockIndex = state.nextBlockIndex++;
state.thinkingBlockStarted = true;
results.push({
type: "content_block_start",
index: state.thinkingBlockIndex,
content_block: { type: "thinking", thinking: "" }
});
}
// Send thinking delta
results.push({
type: "content_block_delta",
index: state.thinkingBlockIndex,
delta: { type: "thinking_delta", thinking: reasoningContent }
});
}
// Handle regular content
if (delta?.content) {
// Stop thinking block before text
stopThinkingBlock(state, results);
// Start text block if needed
if (!state.textBlockStarted) {
state.textBlockIndex = state.nextBlockIndex++;
state.textBlockStarted = true;
state.textBlockClosed = false;
results.push({
type: "content_block_start",
index: state.textBlockIndex,
content_block: { type: "text", text: "" }
});
}
// Send text delta
results.push({
type: "content_block_delta",
index: state.textBlockIndex,
delta: { type: "text_delta", text: delta.content }
});
}
// Tool calls
if (delta?.tool_calls) {
for (const tc of delta.tool_calls) {
const idx = tc.index ?? 0;
if (tc.id) {
// Stop thinking and text blocks before tool use
stopThinkingBlock(state, results);
stopTextBlock(state, results);
// New tool call
const toolBlockIndex = state.nextBlockIndex++;
state.toolCalls.set(idx, { id: tc.id, name: tc.function?.name || "", blockIndex: toolBlockIndex });
results.push({
type: "content_block_start",
index: toolBlockIndex,
content_block: {
type: "tool_use",
id: tc.id,
name: tc.function?.name || "",
input: {}
}
});
}
if (tc.function?.arguments) {
const toolInfo = state.toolCalls.get(idx);
if (toolInfo) {
results.push({
type: "content_block_delta",
index: toolInfo.blockIndex,
delta: { type: "input_json_delta", partial_json: tc.function.arguments }
});
}
}
}
}
// Finish
if (choice.finish_reason) {
// Stop all open blocks
stopThinkingBlock(state, results);
stopTextBlock(state, results);
// Close tool call blocks
for (const [, toolInfo] of state.toolCalls) {
results.push({
type: "content_block_stop",
index: toolInfo.blockIndex
});
}
results.push({
type: "message_delta",
delta: { stop_reason: convertFinishReason(choice.finish_reason) },
usage: { output_tokens: 0 }
});
results.push({ type: "message_stop" });
}
return results.length > 0 ? results : null;
}
// Convert Claude stop_reason to OpenAI finish_reason
function convertStopReason(reason) {
switch (reason) {
case "end_turn": return "stop";
case "max_tokens": return "length";
case "tool_use": return "tool_calls";
case "stop_sequence": return "stop";
default: return "stop";
}
}
// Convert OpenAI finish_reason to Claude stop_reason
function convertFinishReason(reason) {
switch (reason) {
case "stop": return "end_turn";
case "length": return "max_tokens";
case "tool_calls": return "tool_use";
default: return "end_turn";
}
}
// Register
register(FORMATS.CLAUDE, FORMATS.OPENAI, null, claudeToOpenAIResponse);
register(FORMATS.OPENAI, FORMATS.CLAUDE, null, openaiToClaudeResponse);

View File

@@ -0,0 +1,469 @@
import { register } from "../index.js";
import { FORMATS } from "../formats.js";
import { DEFAULT_THINKING_GEMINI_SIGNATURE } from "../../config/defaultThinkingSignature.js";
import {
UNSUPPORTED_SCHEMA_CONSTRAINTS,
DEFAULT_SAFETY_SETTINGS,
convertOpenAIContentToParts,
extractTextContent,
tryParseJSON,
generateRequestId,
generateSessionId,
generateProjectId,
cleanJSONSchemaForAntigravity
} from "../helpers/geminiHelper.js";
// ============================================
// REQUEST TRANSLATORS: OpenAI -> Gemini/GeminiCLI/Antigravity
// ============================================
// Core: Convert OpenAI request to Gemini format (base for all variants)
function openaiToGeminiBase(model, body, stream) {
const result = {
model: model,
contents: [],
generationConfig: {},
safetySettings: DEFAULT_SAFETY_SETTINGS
};
// Generation config
if (body.temperature !== undefined) {
result.generationConfig.temperature = body.temperature;
}
if (body.top_p !== undefined) {
result.generationConfig.topP = body.top_p;
}
if (body.top_k !== undefined) {
result.generationConfig.topK = body.top_k;
}
if (body.max_tokens !== undefined) {
result.generationConfig.maxOutputTokens = body.max_tokens;
}
// Build tool_call_id -> name map
const tcID2Name = {};
if (body.messages && Array.isArray(body.messages)) {
for (const msg of body.messages) {
if (msg.role === "assistant" && msg.tool_calls) {
for (const tc of msg.tool_calls) {
if (tc.type === "function" && tc.id && tc.function?.name) {
tcID2Name[tc.id] = tc.function.name;
}
}
}
}
}
// Build tool responses cache
const toolResponses = {};
if (body.messages && Array.isArray(body.messages)) {
for (const msg of body.messages) {
if (msg.role === "tool" && msg.tool_call_id) {
toolResponses[msg.tool_call_id] = msg.content;
}
}
}
// Convert messages
if (body.messages && Array.isArray(body.messages)) {
for (let i = 0; i < body.messages.length; i++) {
const msg = body.messages[i];
const role = msg.role;
const content = msg.content;
if (role === "system" && body.messages.length > 1) {
result.systemInstruction = {
role: "user",
parts: [{ text: typeof content === "string" ? content : extractTextContent(content) }]
};
} else if (role === "user" || (role === "system" && body.messages.length === 1)) {
const parts = convertOpenAIContentToParts(content);
if (parts.length > 0) {
result.contents.push({ role: "user", parts });
}
} else if (role === "assistant") {
const parts = [];
if (content) {
const text = typeof content === "string" ? content : extractTextContent(content);
if (text) {
parts.push({ text });
}
}
if (msg.tool_calls && Array.isArray(msg.tool_calls)) {
const toolCallIds = [];
for (const tc of msg.tool_calls) {
if (tc.type !== "function") continue;
const args = tryParseJSON(tc.function?.arguments || "{}");
parts.push({
thoughtSignature: DEFAULT_THINKING_GEMINI_SIGNATURE,
functionCall: {
id: tc.id,
name: tc.function.name,
args: args
}
});
toolCallIds.push(tc.id);
}
if (parts.length > 0) {
result.contents.push({ role: "model", parts });
}
// Append function responses - extract name from tool_call_id format "ToolName-timestamp-index"
const toolParts = [];
for (const fid of toolCallIds) {
// Try to get name from tcID2Name map first, then extract from id format
let name = tcID2Name[fid];
if (!name) {
// Extract name from id format: "ToolName-timestamp-index"
const idParts = fid.split("-");
if (idParts.length > 2) {
name = idParts.slice(0, -2).join("-");
} else {
name = fid;
}
}
let resp = toolResponses[fid] || "{}";
let parsedResp = tryParseJSON(resp);
if (parsedResp === null) {
parsedResp = { result: resp };
} else if (typeof parsedResp !== "object") {
parsedResp = { result: parsedResp };
}
toolParts.push({
functionResponse: {
id: fid,
name: name,
response: { result: parsedResp }
}
});
}
if (toolParts.length > 0) {
result.contents.push({ role: "user", parts: toolParts });
}
} else if (parts.length > 0) {
result.contents.push({ role: "model", parts });
}
}
}
}
// Convert tools
if (body.tools && Array.isArray(body.tools) && body.tools.length > 0) {
const functionDeclarations = [];
for (const t of body.tools) {
if (t.type === "function" && t.function) {
const fn = t.function;
functionDeclarations.push({
name: fn.name,
description: fn.description || "",
parameters: fn.parameters || { type: "object", properties: {} }
});
}
}
if (functionDeclarations.length > 0) {
result.tools = [{ functionDeclarations }];
}
}
return result;
}
// OpenAI -> Gemini (standard API)
function openaiToGemini(model, body, stream) {
return openaiToGeminiBase(model, body, stream);
}
// OpenAI -> Gemini CLI (Cloud Code Assist)
function openaiToGeminiCLI(model, body, stream) {
const gemini = openaiToGeminiBase(model, body, stream);
const isClaude = model.toLowerCase().includes("claude");
// Add thinking config for CLI
if (body.reasoning_effort) {
const budgetMap = { low: 1024, medium: 8192, high: 32768 };
const budget = budgetMap[body.reasoning_effort] || 8192;
gemini.generationConfig.thinkingConfig = {
thinkingBudget: budget,
include_thoughts: true
};
}
// Thinking config from Claude format
if (body.thinking?.type === "enabled" && body.thinking.budget_tokens) {
gemini.generationConfig.thinkingConfig = {
thinkingBudget: body.thinking.budget_tokens,
include_thoughts: true
};
}
// Clean schema for tools
// Claude models: use "parameters" (backend converts parametersJsonSchema -> parameters)
// Gemini native: use "parametersJsonSchema" (backend expects this field)
if (gemini.tools?.[0]?.functionDeclarations) {
for (const fn of gemini.tools[0].functionDeclarations) {
if (fn.parameters) {
const cleanedSchema = cleanJSONSchemaForAntigravity(fn.parameters);
if (isClaude) {
fn.parameters = cleanedSchema;
} else {
fn.parametersJsonSchema = cleanedSchema;
delete fn.parameters;
}
}
}
}
return gemini;
}
// Wrap Gemini CLI format in Cloud Code wrapper
function wrapInCloudCodeEnvelope(model, geminiCLI, credentials = null) {
// Use real project ID if available, otherwise generate random
const projectId = credentials?.projectId || generateProjectId();
return {
project: projectId,
model: model,
userAgent: "gemini-cli",
requestId: generateRequestId(),
request: {
sessionId: generateSessionId(),
contents: geminiCLI.contents,
systemInstruction: geminiCLI.systemInstruction,
generationConfig: geminiCLI.generationConfig,
safetySettings: geminiCLI.safetySettings,
tools: geminiCLI.tools,
}
};
}
// OpenAI -> Antigravity (Sandbox Cloud Code with wrapper)
function openaiToAntigravity(model, body, stream, credentials = null) {
const geminiCLI = openaiToGeminiCLI(model, body, stream);
return wrapInCloudCodeEnvelope(model, geminiCLI, credentials);
}
// ============================================
// RESPONSE TRANSLATORS: Gemini/GeminiCLI/Antigravity -> OpenAI
// ============================================
// Core: Convert Gemini response chunk to OpenAI format
function geminiToOpenAIResponse(chunk, state) {
if (!chunk) return null;
// Handle Antigravity wrapper
const response = chunk.response || chunk;
if (!response || !response.candidates?.[0]) return null;
const results = [];
const candidate = response.candidates[0];
const content = candidate.content;
// Initialize state
if (!state.messageId) {
state.messageId = response.responseId || `msg_${Date.now()}`;
state.model = response.modelVersion || "gemini";
state.functionIndex = 0;
results.push({
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: state.model,
choices: [{
index: 0,
delta: { role: "assistant" },
finish_reason: null
}]
});
}
// Process parts
if (content?.parts) {
for (const part of content.parts) {
const hasThoughtSig = part.thoughtSignature || part.thought_signature;
const isThought = part.thought === true;
// Handle thought signature (thinking mode)
if (hasThoughtSig) {
const hasTextContent = part.text !== undefined && part.text !== "";
const hasFunctionCall = !!part.functionCall;
// If there's text with thoughtSignature
if (hasTextContent) {
results.push({
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: state.model,
choices: [{
index: 0,
delta: isThought
? { reasoning_content: part.text }
: { content: part.text },
finish_reason: null
}]
});
}
// Process functionCall if exists, then skip to next part
if (hasFunctionCall) {
const fcName = part.functionCall.name;
const fcArgs = part.functionCall.args || {};
const toolCallIndex = state.functionIndex++;
const toolCall = {
id: `${fcName}-${Date.now()}-${toolCallIndex}`,
index: toolCallIndex,
type: "function",
function: {
name: fcName,
arguments: JSON.stringify(fcArgs)
}
};
state.toolCalls.set(toolCallIndex, toolCall);
results.push({
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: state.model,
choices: [{
index: 0,
delta: { tool_calls: [toolCall] },
finish_reason: null
}]
});
}
continue;
}
// Text content (non-thinking) - skip empty text
if (part.text !== undefined && part.text !== "") {
results.push({
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: state.model,
choices: [{
index: 0,
delta: { content: part.text },
finish_reason: null
}]
});
}
// Function call
if (part.functionCall) {
const fcName = part.functionCall.name;
const fcArgs = part.functionCall.args || {};
const toolCallIndex = state.functionIndex++;
const toolCall = {
id: `${fcName}-${Date.now()}-${toolCallIndex}`,
index: toolCallIndex,
type: "function",
function: {
name: fcName,
arguments: JSON.stringify(fcArgs)
}
};
state.toolCalls.set(toolCallIndex, toolCall);
results.push({
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: state.model,
choices: [{
index: 0,
delta: { tool_calls: [toolCall] },
finish_reason: null
}]
});
}
// Inline data (images)
const inlineData = part.inlineData || part.inline_data;
if (inlineData?.data) {
const mimeType = inlineData.mimeType || inlineData.mime_type || "image/png";
results.push({
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: state.model,
choices: [{
index: 0,
delta: {
images: [{
type: "image_url",
image_url: { url: `data:${mimeType};base64,${inlineData.data}` }
}]
},
finish_reason: null
}]
});
}
}
}
// Finish reason
if (candidate.finishReason) {
let finishReason = candidate.finishReason.toLowerCase();
if (finishReason === "stop" && state.toolCalls.size > 0) {
finishReason = "tool_calls";
}
results.push({
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: state.model,
choices: [{
index: 0,
delta: {},
finish_reason: finishReason
}]
});
state.finishReason = finishReason;
}
// Usage metadata
const usage = response.usageMetadata || chunk.usageMetadata;
if (usage) {
const promptTokens = (usage.promptTokenCount || 0) + (usage.thoughtsTokenCount || 0);
state.usage = {
prompt_tokens: promptTokens,
completion_tokens: usage.candidatesTokenCount || 0,
total_tokens: usage.totalTokenCount || 0
};
if (usage.thoughtsTokenCount > 0) {
state.usage.completion_tokens_details = {
reasoning_tokens: usage.thoughtsTokenCount
};
}
}
return results.length > 0 ? results : null;
}
// ============================================
// REGISTER ALL TRANSLATORS
// ============================================
// Request: OpenAI -> Gemini variants
register(FORMATS.OPENAI, FORMATS.GEMINI, openaiToGemini, null);
register(FORMATS.OPENAI, FORMATS.GEMINI_CLI, (model, body, stream, credentials) => wrapInCloudCodeEnvelope(model, openaiToGeminiCLI(model, body, stream), credentials), null);
register(FORMATS.OPENAI, FORMATS.ANTIGRAVITY, openaiToAntigravity, null);
// Response: Gemini variants -> OpenAI (all use same handler)
register(FORMATS.GEMINI, FORMATS.OPENAI, null, geminiToOpenAIResponse);
register(FORMATS.GEMINI_CLI, FORMATS.OPENAI, null, geminiToOpenAIResponse);
register(FORMATS.ANTIGRAVITY, FORMATS.OPENAI, null, geminiToOpenAIResponse);

View File

@@ -0,0 +1,361 @@
/**
* Translator: OpenAI Chat Completions → OpenAI Responses API (response)
* Converts streaming chunks from Chat Completions to Responses API events
*/
import { register } from "../index.js";
import { FORMATS } from "../formats.js";
/**
* Translate OpenAI chunk to Responses API events
* @returns {Array} Array of events with { event, data } structure
*/
function translateResponse(chunk, state) {
if (!chunk) {
// Flush remaining events
return flushEvents(state);
}
if (!chunk.choices?.length) return [];
const events = [];
const nextSeq = () => ++state.seq;
const emit = (eventType, data) => {
data.sequence_number = nextSeq();
events.push({ event: eventType, data });
};
const choice = chunk.choices[0];
const idx = choice.index || 0;
const delta = choice.delta || {};
// Emit initial events
if (!state.started) {
state.started = true;
state.responseId = chunk.id ? `resp_${chunk.id}` : state.responseId;
emit("response.created", {
type: "response.created",
response: {
id: state.responseId,
object: "response",
created_at: state.created,
status: "in_progress",
background: false,
error: null,
output: []
}
});
emit("response.in_progress", {
type: "response.in_progress",
response: {
id: state.responseId,
object: "response",
created_at: state.created,
status: "in_progress"
}
});
}
// Handle reasoning_content
if (delta.reasoning_content) {
startReasoning(state, emit, idx);
emitReasoningDelta(state, emit, delta.reasoning_content);
}
// Handle text content
if (delta.content) {
let content = delta.content;
if (content.includes("<think>")) {
state.inThinking = true;
content = content.replace("<think>", "");
startReasoning(state, emit, idx);
}
if (content.includes("</think>")) {
const parts = content.split("</think>");
const thinkPart = parts[0];
const textPart = parts.slice(1).join("</think>");
if (thinkPart) emitReasoningDelta(state, emit, thinkPart);
closeReasoning(state, emit);
state.inThinking = false;
content = textPart;
}
if (state.inThinking && content) {
emitReasoningDelta(state, emit, content);
return events;
}
if (content) {
emitTextContent(state, emit, idx, content);
}
}
// Handle tool_calls
if (delta.tool_calls) {
closeMessage(state, emit, idx);
for (const tc of delta.tool_calls) {
emitToolCall(state, emit, tc);
}
}
// Handle finish_reason
if (choice.finish_reason) {
for (const i in state.msgItemAdded) closeMessage(state, emit, i);
closeReasoning(state, emit);
for (const i in state.funcCallIds) closeToolCall(state, emit, i);
sendCompleted(state, emit);
}
return events;
}
// Helper functions
function startReasoning(state, emit, idx) {
if (!state.reasoningId) {
state.reasoningId = `rs_${state.responseId}_${idx}`;
state.reasoningIndex = idx;
emit("response.output_item.added", {
type: "response.output_item.added",
output_index: idx,
item: { id: state.reasoningId, type: "reasoning", summary: [] }
});
emit("response.reasoning_summary_part.added", {
type: "response.reasoning_summary_part.added",
item_id: state.reasoningId,
output_index: idx,
summary_index: 0,
part: { type: "summary_text", text: "" }
});
state.reasoningPartAdded = true;
}
}
function emitReasoningDelta(state, emit, text) {
if (!text) return;
state.reasoningBuf += text;
emit("response.reasoning_summary_text.delta", {
type: "response.reasoning_summary_text.delta",
item_id: state.reasoningId,
output_index: state.reasoningIndex,
summary_index: 0,
delta: text
});
}
function closeReasoning(state, emit) {
if (state.reasoningId && !state.reasoningDone) {
state.reasoningDone = true;
emit("response.reasoning_summary_text.done", {
type: "response.reasoning_summary_text.done",
item_id: state.reasoningId,
output_index: state.reasoningIndex,
summary_index: 0,
text: state.reasoningBuf
});
emit("response.reasoning_summary_part.done", {
type: "response.reasoning_summary_part.done",
item_id: state.reasoningId,
output_index: state.reasoningIndex,
summary_index: 0,
part: { type: "summary_text", text: state.reasoningBuf }
});
emit("response.output_item.done", {
type: "response.output_item.done",
output_index: state.reasoningIndex,
item: {
id: state.reasoningId,
type: "reasoning",
summary: [{ type: "summary_text", text: state.reasoningBuf }]
}
});
}
}
function emitTextContent(state, emit, idx, content) {
if (!state.msgItemAdded[idx]) {
state.msgItemAdded[idx] = true;
const msgId = `msg_${state.responseId}_${idx}`;
emit("response.output_item.added", {
type: "response.output_item.added",
output_index: idx,
item: { id: msgId, type: "message", content: [], role: "assistant" }
});
}
if (!state.msgContentAdded[idx]) {
state.msgContentAdded[idx] = true;
emit("response.content_part.added", {
type: "response.content_part.added",
item_id: `msg_${state.responseId}_${idx}`,
output_index: idx,
content_index: 0,
part: { type: "output_text", annotations: [], logprobs: [], text: "" }
});
}
emit("response.output_text.delta", {
type: "response.output_text.delta",
item_id: `msg_${state.responseId}_${idx}`,
output_index: idx,
content_index: 0,
delta: content,
logprobs: []
});
if (!state.msgTextBuf[idx]) state.msgTextBuf[idx] = "";
state.msgTextBuf[idx] += content;
}
function closeMessage(state, emit, idx) {
if (state.msgItemAdded[idx] && !state.msgItemDone[idx]) {
state.msgItemDone[idx] = true;
const fullText = state.msgTextBuf[idx] || "";
const msgId = `msg_${state.responseId}_${idx}`;
emit("response.output_text.done", {
type: "response.output_text.done",
item_id: msgId,
output_index: parseInt(idx),
content_index: 0,
text: fullText,
logprobs: []
});
emit("response.content_part.done", {
type: "response.content_part.done",
item_id: msgId,
output_index: parseInt(idx),
content_index: 0,
part: { type: "output_text", annotations: [], logprobs: [], text: fullText }
});
emit("response.output_item.done", {
type: "response.output_item.done",
output_index: parseInt(idx),
item: {
id: msgId,
type: "message",
content: [{ type: "output_text", annotations: [], logprobs: [], text: fullText }],
role: "assistant"
}
});
}
}
function emitToolCall(state, emit, tc) {
const tcIdx = tc.index ?? 0;
const newCallId = tc.id;
const funcName = tc.function?.name;
if (funcName) state.funcNames[tcIdx] = funcName;
if (!state.funcCallIds[tcIdx] && newCallId) {
state.funcCallIds[tcIdx] = newCallId;
emit("response.output_item.added", {
type: "response.output_item.added",
output_index: tcIdx,
item: {
id: `fc_${newCallId}`,
type: "function_call",
arguments: "",
call_id: newCallId,
name: state.funcNames[tcIdx] || ""
}
});
}
if (!state.funcArgsBuf[tcIdx]) state.funcArgsBuf[tcIdx] = "";
if (tc.function?.arguments) {
const refCallId = state.funcCallIds[tcIdx] || newCallId;
if (refCallId) {
emit("response.function_call_arguments.delta", {
type: "response.function_call_arguments.delta",
item_id: `fc_${refCallId}`,
output_index: tcIdx,
delta: tc.function.arguments
});
}
state.funcArgsBuf[tcIdx] += tc.function.arguments;
}
}
function closeToolCall(state, emit, idx) {
const callId = state.funcCallIds[idx];
if (callId && !state.funcItemDone[idx]) {
const args = state.funcArgsBuf[idx] || "{}";
emit("response.function_call_arguments.done", {
type: "response.function_call_arguments.done",
item_id: `fc_${callId}`,
output_index: parseInt(idx),
arguments: args
});
emit("response.output_item.done", {
type: "response.output_item.done",
output_index: parseInt(idx),
item: {
id: `fc_${callId}`,
type: "function_call",
arguments: args,
call_id: callId,
name: state.funcNames[idx] || ""
}
});
state.funcItemDone[idx] = true;
state.funcArgsDone[idx] = true;
}
}
function sendCompleted(state, emit) {
if (!state.completedSent) {
state.completedSent = true;
emit("response.completed", {
type: "response.completed",
response: {
id: state.responseId,
object: "response",
created_at: state.created,
status: "completed",
background: false,
error: null
}
});
}
}
function flushEvents(state) {
if (state.completedSent) return [];
const events = [];
const nextSeq = () => ++state.seq;
const emit = (eventType, data) => {
data.sequence_number = nextSeq();
events.push({ event: eventType, data });
};
for (const i in state.msgItemAdded) closeMessage(state, emit, i);
closeReasoning(state, emit);
for (const i in state.funcCallIds) closeToolCall(state, emit, i);
sendCompleted(state, emit);
return events;
}
// Register translator
register(FORMATS.OPENAI, FORMATS.OPENAI_RESPONSES, null, translateResponse);

View File

@@ -0,0 +1,179 @@
// Claude helper functions for translator
import { DEFAULT_THINKING_CLAUDE_SIGNATURE } from "../../config/defaultThinkingSignature.js";
// Check if message has valid non-empty content
export function hasValidContent(msg) {
if (typeof msg.content === "string" && msg.content.trim()) return true;
if (Array.isArray(msg.content)) {
return msg.content.some(block =>
(block.type === "text" && block.text?.trim()) ||
block.type === "tool_use" ||
block.type === "tool_result"
);
}
return false;
}
// Fix tool_use/tool_result ordering for Claude API
// 1. Assistant message with tool_use: remove text AFTER tool_use (Claude doesn't allow)
// 2. Merge consecutive same-role messages
export function fixToolUseOrdering(messages) {
if (messages.length <= 1) return messages;
// Pass 1: Fix assistant messages with tool_use - remove text after tool_use
for (const msg of messages) {
if (msg.role === "assistant" && Array.isArray(msg.content)) {
const hasToolUse = msg.content.some(b => b.type === "tool_use");
if (hasToolUse) {
// Keep only: thinking blocks + tool_use blocks (remove text blocks after tool_use)
const newContent = [];
let foundToolUse = false;
for (const block of msg.content) {
if (block.type === "tool_use") {
foundToolUse = true;
newContent.push(block);
} else if (block.type === "thinking" || block.type === "redacted_thinking") {
newContent.push(block);
} else if (!foundToolUse) {
// Keep text blocks BEFORE tool_use
newContent.push(block);
}
// Skip text blocks AFTER tool_use
}
msg.content = newContent;
}
}
}
// Pass 2: Merge consecutive same-role messages
const merged = [];
for (const msg of messages) {
const last = merged[merged.length - 1];
if (last && last.role === msg.role) {
// Merge content arrays
const lastContent = Array.isArray(last.content) ? last.content : [{ type: "text", text: last.content }];
const msgContent = Array.isArray(msg.content) ? msg.content : [{ type: "text", text: msg.content }];
// Put tool_result first, then other content
const toolResults = [...lastContent.filter(b => b.type === "tool_result"), ...msgContent.filter(b => b.type === "tool_result")];
const otherContent = [...lastContent.filter(b => b.type !== "tool_result"), ...msgContent.filter(b => b.type !== "tool_result")];
last.content = [...toolResults, ...otherContent];
} else {
// Ensure content is array
const content = Array.isArray(msg.content) ? msg.content : [{ type: "text", text: msg.content }];
merged.push({ role: msg.role, content: [...content] });
}
}
return merged;
}
// Prepare request for Claude format endpoints
// - Cleanup cache_control
// - Filter empty messages
// - Add thinking block for Anthropic endpoint (provider === "claude")
// - Fix tool_use/tool_result ordering
export function prepareClaudeRequest(body, provider = null) {
// 1. System: remove all cache_control, add only to last block with ttl 1h
if (body.system && Array.isArray(body.system)) {
body.system = body.system.map((block, i) => {
const { cache_control, ...rest } = block;
if (i === body.system.length - 1) {
return { ...rest, cache_control: { type: "ephemeral", ttl: "1h" } };
}
return rest;
});
}
// 2. Messages: process in optimized passes
if (body.messages && Array.isArray(body.messages)) {
const len = body.messages.length;
let filtered = [];
// Pass 1: remove cache_control + filter empty messages
for (let i = 0; i < len; i++) {
const msg = body.messages[i];
// Remove cache_control from content blocks
if (Array.isArray(msg.content)) {
for (const block of msg.content) {
delete block.cache_control;
}
}
// Keep final assistant even if empty, otherwise check valid content
const isFinalAssistant = i === len - 1 && msg.role === "assistant";
if (isFinalAssistant || hasValidContent(msg)) {
filtered.push(msg);
}
}
// Pass 1.5: Fix tool_use/tool_result ordering
// Each tool_use must have tool_result in the NEXT message (not same message with other content)
filtered = fixToolUseOrdering(filtered);
body.messages = filtered;
// Check if thinking is enabled AND last message is from user
const lastMessage = filtered[filtered.length - 1];
const lastMessageIsUser = lastMessage?.role === "user";
const thinkingEnabled = body.thinking?.type === "enabled" && lastMessageIsUser;
// Pass 2 (reverse): add cache_control to last assistant + handle thinking for Anthropic
let lastAssistantProcessed = false;
for (let i = filtered.length - 1; i >= 0; i--) {
const msg = filtered[i];
if (msg.role === "assistant" && Array.isArray(msg.content)) {
// Add cache_control to last block of first (from end) assistant with content
if (!lastAssistantProcessed && msg.content.length > 0) {
msg.content[msg.content.length - 1].cache_control = { type: "ephemeral" };
lastAssistantProcessed = true;
}
// Handle thinking blocks for Anthropic endpoint only
if (provider === "claude") {
let hasToolUse = false;
let hasThinking = false;
// Always replace signature for all thinking blocks
for (const block of msg.content) {
if (block.type === "thinking" || block.type === "redacted_thinking") {
block.signature = DEFAULT_THINKING_CLAUDE_SIGNATURE;
hasThinking = true;
}
if (block.type === "tool_use") hasToolUse = true;
}
// Add thinking block if thinking enabled + has tool_use but no thinking
if (thinkingEnabled && !hasThinking && hasToolUse) {
msg.content.unshift({
type: "thinking",
thinking: ".",
signature: DEFAULT_THINKING_CLAUDE_SIGNATURE
});
}
}
}
}
}
// 3. Tools: remove all cache_control, add only to last tool with ttl 1h
if (body.tools && Array.isArray(body.tools)) {
body.tools = body.tools.map((tool, i) => {
const { cache_control, ...rest } = tool;
if (i === body.tools.length - 1) {
return { ...rest, cache_control: { type: "ephemeral", ttl: "1h" } };
}
return rest;
});
}
return body;
}

View File

@@ -0,0 +1,131 @@
// Gemini helper functions for translator
// Unsupported JSON Schema constraints that should be removed for Antigravity
export const UNSUPPORTED_SCHEMA_CONSTRAINTS = [
"minLength", "maxLength", "exclusiveMinimum", "exclusiveMaximum",
"pattern", "minItems", "maxItems", "format",
"default", "examples", "$schema", "const"
];
// Default safety settings
export const DEFAULT_SAFETY_SETTINGS = [
{ category: "HARM_CATEGORY_HATE_SPEECH", threshold: "OFF" },
{ category: "HARM_CATEGORY_DANGEROUS_CONTENT", threshold: "OFF" },
{ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold: "OFF" },
{ category: "HARM_CATEGORY_HARASSMENT", threshold: "OFF" },
{ category: "HARM_CATEGORY_CIVIC_INTEGRITY", threshold: "OFF" }
];
// Convert OpenAI content to Gemini parts
export function convertOpenAIContentToParts(content) {
const parts = [];
if (typeof content === "string") {
parts.push({ text: content });
} else if (Array.isArray(content)) {
for (const item of content) {
if (item.type === "text") {
parts.push({ text: item.text });
} else if (item.type === "image_url" && item.image_url?.url?.startsWith("data:")) {
const match = item.image_url.url.match(/^data:([^;]+);base64,(.+)$/);
if (match) {
parts.push({
inlineData: { mime_type: match[1], data: match[2] }
});
}
}
}
}
return parts;
}
// Extract text content from OpenAI content
export function extractTextContent(content) {
if (typeof content === "string") return content;
if (Array.isArray(content)) {
return content.filter(c => c.type === "text").map(c => c.text).join("");
}
return "";
}
// Try parse JSON safely
export function tryParseJSON(str) {
if (typeof str !== "string") return str;
try {
return JSON.parse(str);
} catch {
return null;
}
}
// Generate request ID
export function generateRequestId() {
return `agent-${crypto.randomUUID()}`;
}
// Generate session ID
export function generateSessionId() {
return `-${Math.floor(Math.random() * 9000000000000000000)}`;
}
// Generate project ID
export function generateProjectId() {
const adjectives = ["useful", "bright", "swift", "calm", "bold"];
const nouns = ["fuze", "wave", "spark", "flow", "core"];
const adj = adjectives[Math.floor(Math.random() * adjectives.length)];
const noun = nouns[Math.floor(Math.random() * nouns.length)];
return `${adj}-${noun}-${crypto.randomUUID().slice(0, 5)}`;
}
// Clean JSON Schema for Antigravity API compatibility - removes unsupported keywords recursively
export function cleanJSONSchemaForAntigravity(schema) {
if (!schema || typeof schema !== "object") return schema;
const cleaned = Array.isArray(schema) ? [] : {};
for (const [key, value] of Object.entries(schema)) {
if (UNSUPPORTED_SCHEMA_CONSTRAINTS.includes(key)) continue;
// Handle type array like ["string", "null"] - Gemini only supports single type
if (key === "type" && Array.isArray(value)) {
const nonNullType = value.find(t => t !== "null") || "string";
cleaned[key] = nonNullType;
continue;
}
if (value && typeof value === "object") {
cleaned[key] = cleanJSONSchemaForAntigravity(value);
} else {
cleaned[key] = value;
}
}
// Cleanup required fields - only keep fields that exist in properties
if (cleaned.required && Array.isArray(cleaned.required) && cleaned.properties) {
const validRequired = cleaned.required.filter(field =>
Object.prototype.hasOwnProperty.call(cleaned.properties, field)
);
if (validRequired.length === 0) {
delete cleaned.required;
} else {
cleaned.required = validRequired;
}
}
// Add placeholder for empty object schemas (Antigravity requirement)
if (cleaned.type === "object") {
if (!cleaned.properties || Object.keys(cleaned.properties).length === 0) {
cleaned.properties = {
reason: {
type: "string",
description: "Brief explanation of why you are calling this tool"
}
};
cleaned.required = ["reason"];
}
}
return cleaned;
}

View File

@@ -0,0 +1,22 @@
import { DEFAULT_MAX_TOKENS, DEFAULT_MIN_TOKENS } from "../../config/constants.js";
/**
* Adjust max_tokens based on request context
* @param {object} body - Request body
* @returns {number} Adjusted max_tokens
*/
export function adjustMaxTokens(body) {
let maxTokens = body.max_tokens || DEFAULT_MAX_TOKENS;
// Auto-increase for tool calling to prevent truncated arguments
// Tool calls with large content (like writing files) need more tokens
if (body.tools && Array.isArray(body.tools) && body.tools.length > 0) {
if (maxTokens < DEFAULT_MIN_TOKENS) {
console.log(`[AUTO-ADJUST] max_tokens: ${maxTokens}${DEFAULT_MIN_TOKENS} (tool calling detected)`);
maxTokens = DEFAULT_MIN_TOKENS;
}
}
return maxTokens;
}

View File

@@ -0,0 +1,80 @@
// OpenAI helper functions for translator
// Valid OpenAI content block types
export const VALID_OPENAI_CONTENT_TYPES = ["text", "image_url", "image"];
export const VALID_OPENAI_MESSAGE_TYPES = ["text", "image_url", "image", "tool_calls", "tool_result"];
// Filter messages to OpenAI standard format
// Remove: thinking, redacted_thinking, signature, and other non-OpenAI blocks
export function filterToOpenAIFormat(body) {
if (!body.messages || !Array.isArray(body.messages)) return body;
body.messages = body.messages.map(msg => {
// Keep tool messages as-is (OpenAI format)
if (msg.role === "tool") return msg;
// Keep assistant messages with tool_calls as-is
if (msg.role === "assistant" && msg.tool_calls) return msg;
// Handle string content
if (typeof msg.content === "string") return msg;
// Handle array content
if (Array.isArray(msg.content)) {
const filteredContent = [];
for (const block of msg.content) {
// Skip thinking blocks
if (block.type === "thinking" || block.type === "redacted_thinking") continue;
// Only keep valid OpenAI content types
if (VALID_OPENAI_CONTENT_TYPES.includes(block.type)) {
// Remove signature field if exists
const { signature, cache_control, ...cleanBlock } = block;
filteredContent.push(cleanBlock);
} else if (block.type === "tool_use") {
// Convert tool_use to tool_calls format (handled separately)
continue;
} else if (block.type === "tool_result") {
// Keep tool_result but clean it
const { signature, cache_control, ...cleanBlock } = block;
filteredContent.push(cleanBlock);
}
}
// If all content was filtered, add empty text
if (filteredContent.length === 0) {
filteredContent.push({ type: "text", text: "" });
}
return { ...msg, content: filteredContent };
}
return msg;
});
// Filter out messages with only empty text (but NEVER filter tool messages)
body.messages = body.messages.filter(msg => {
// Always keep tool messages
if (msg.role === "tool") return true;
// Always keep assistant messages with tool_calls
if (msg.role === "assistant" && msg.tool_calls) return true;
if (typeof msg.content === "string") return msg.content.trim() !== "";
if (Array.isArray(msg.content)) {
return msg.content.some(b =>
(b.type === "text" && b.text?.trim()) ||
b.type !== "text"
);
}
return true;
});
// Remove empty tools array (some providers like QWEN reject it)
if (body.tools && Array.isArray(body.tools) && body.tools.length === 0) {
delete body.tools;
}
return body;
}

View File

@@ -0,0 +1,103 @@
/**
* Convert OpenAI Responses API format to standard chat completions format
* Responses API uses: { input: [...], instructions: "..." }
* Chat API uses: { messages: [...] }
*/
export function convertResponsesApiFormat(body) {
if (!body.input) return body;
const result = { ...body };
result.messages = [];
// Convert instructions to system message
if (body.instructions) {
result.messages.push({ role: "system", content: body.instructions });
}
// Group items by conversation turn
let currentAssistantMsg = null;
let pendingToolCalls = [];
let pendingToolResults = [];
for (const item of body.input) {
if (item.type === "message") {
// Flush any pending assistant message with tool calls
if (currentAssistantMsg) {
result.messages.push(currentAssistantMsg);
currentAssistantMsg = null;
}
// Flush pending tool results
if (pendingToolResults.length > 0) {
for (const tr of pendingToolResults) {
result.messages.push(tr);
}
pendingToolResults = [];
}
// Convert content: input_text → text, output_text → text
const content = Array.isArray(item.content)
? item.content.map(c => {
if (c.type === "input_text") return { type: "text", text: c.text };
if (c.type === "output_text") return { type: "text", text: c.text };
return c;
})
: item.content;
result.messages.push({ role: item.role, content });
}
else if (item.type === "function_call") {
// Start or append to assistant message with tool_calls
if (!currentAssistantMsg) {
currentAssistantMsg = {
role: "assistant",
content: null,
tool_calls: []
};
}
currentAssistantMsg.tool_calls.push({
id: item.call_id,
type: "function",
function: {
name: item.name,
arguments: item.arguments
}
});
}
else if (item.type === "function_call_output") {
// Flush assistant message first if exists
if (currentAssistantMsg) {
result.messages.push(currentAssistantMsg);
currentAssistantMsg = null;
}
// Add tool result
pendingToolResults.push({
role: "tool",
tool_call_id: item.call_id,
content: typeof item.output === "string" ? item.output : JSON.stringify(item.output)
});
}
else if (item.type === "reasoning") {
// Skip reasoning items - they are for display only
continue;
}
}
// Flush remaining
if (currentAssistantMsg) {
result.messages.push(currentAssistantMsg);
}
if (pendingToolResults.length > 0) {
for (const tr of pendingToolResults) {
result.messages.push(tr);
}
}
// Cleanup Responses API specific fields
delete result.input;
delete result.instructions;
delete result.include;
delete result.prompt_cache_key;
delete result.store;
delete result.reasoning;
return result;
}

View File

@@ -0,0 +1,111 @@
// Tool call helper functions for translator
// Generate unique tool call ID
export function generateToolCallId() {
return `call_${Date.now().toString(36)}_${Math.random().toString(36).slice(2, 9)}`;
}
// Ensure all tool_calls have id field and arguments is string (some providers require it)
export function ensureToolCallIds(body) {
if (!body.messages || !Array.isArray(body.messages)) return body;
for (const msg of body.messages) {
if (msg.role === "assistant" && msg.tool_calls && Array.isArray(msg.tool_calls)) {
for (const tc of msg.tool_calls) {
if (!tc.id) {
tc.id = generateToolCallId();
}
if (!tc.type) {
tc.type = "function";
}
// Ensure arguments is JSON string, not object
if (tc.function?.arguments && typeof tc.function.arguments !== "string") {
tc.function.arguments = JSON.stringify(tc.function.arguments);
}
}
}
}
return body;
}
// Get tool_call ids from assistant message (OpenAI format: tool_calls, Claude format: tool_use in content)
export function getToolCallIds(msg) {
if (msg.role !== "assistant") return [];
const ids = [];
// OpenAI format: tool_calls array
if (msg.tool_calls && Array.isArray(msg.tool_calls)) {
for (const tc of msg.tool_calls) {
if (tc.id) ids.push(tc.id);
}
}
// Claude format: tool_use blocks in content
if (Array.isArray(msg.content)) {
for (const block of msg.content) {
if (block.type === "tool_use" && block.id) {
ids.push(block.id);
}
}
}
return ids;
}
// Check if user message has tool_result for given ids (OpenAI format: role=tool, Claude format: tool_result in content)
export function hasToolResults(msg, toolCallIds) {
if (!msg || !toolCallIds.length) return false;
// OpenAI format: role = "tool" with tool_call_id
if (msg.role === "tool" && msg.tool_call_id) {
return toolCallIds.includes(msg.tool_call_id);
}
// Claude format: tool_result blocks in user message content
if (msg.role === "user" && Array.isArray(msg.content)) {
for (const block of msg.content) {
if (block.type === "tool_result" && toolCallIds.includes(block.tool_use_id)) {
return true;
}
}
}
return false;
}
// Fix missing tool responses - insert empty tool_result if assistant has tool_use but next message has no tool_result
export function fixMissingToolResponses(body) {
if (!body.messages || !Array.isArray(body.messages)) return body;
const newMessages = [];
for (let i = 0; i < body.messages.length; i++) {
const msg = body.messages[i];
const nextMsg = body.messages[i + 1];
newMessages.push(msg);
// Check if this is assistant with tool_calls/tool_use
const toolCallIds = getToolCallIds(msg);
if (toolCallIds.length === 0) continue;
// Check if next message has tool_result
if (nextMsg && !hasToolResults(nextMsg, toolCallIds)) {
// Insert tool responses for each tool_call
for (const id of toolCallIds) {
// OpenAI format: role = "tool"
newMessages.push({
role: "tool",
tool_call_id: id,
content: ""
});
}
}
}
body.messages = newMessages;
return body;
}

View File

@@ -0,0 +1,167 @@
import { FORMATS } from "./formats.js";
import { ensureToolCallIds, fixMissingToolResponses } from "./helpers/toolCallHelper.js";
import { prepareClaudeRequest } from "./helpers/claudeHelper.js";
import { filterToOpenAIFormat } from "./helpers/openaiHelper.js";
import { normalizeThinkingConfig } from "../services/provider.js";
// Registry for translators
const requestRegistry = new Map();
const responseRegistry = new Map();
// Register translator
export function register(from, to, requestFn, responseFn) {
const key = `${from}:${to}`;
if (requestFn) {
requestRegistry.set(key, requestFn);
}
if (responseFn) {
responseRegistry.set(key, responseFn);
}
}
// Translate request: source -> openai -> target
export function translateRequest(sourceFormat, targetFormat, model, body, stream = true, credentials = null, provider = null) {
let result = body;
// Normalize thinking config: remove if lastMessage is not user
normalizeThinkingConfig(result);
// Always ensure tool_calls have id (some providers require it)
ensureToolCallIds(result);
// Fix missing tool responses (insert empty tool_result if needed)
fixMissingToolResponses(result);
// If same format, skip translation steps
if (sourceFormat !== targetFormat) {
// Step 1: source -> openai (if source is not openai)
if (sourceFormat !== FORMATS.OPENAI) {
const toOpenAI = requestRegistry.get(`${sourceFormat}:${FORMATS.OPENAI}`);
if (toOpenAI) {
result = toOpenAI(model, result, stream, credentials);
}
}
// Step 1.5: Filter to clean OpenAI format (only when target is OpenAI)
if (targetFormat === FORMATS.OPENAI) {
result = filterToOpenAIFormat(result);
}
// Step 2: openai -> target (if target is not openai)
if (targetFormat !== FORMATS.OPENAI) {
const fromOpenAI = requestRegistry.get(`${FORMATS.OPENAI}:${targetFormat}`);
if (fromOpenAI) {
result = fromOpenAI(model, result, stream, credentials);
}
}
}
// Final step: prepare request for Claude format endpoints
if (targetFormat === FORMATS.CLAUDE) {
result = prepareClaudeRequest(result, provider);
}
return result;
}
// Translate response chunk: target -> openai -> source
export function translateResponse(targetFormat, sourceFormat, chunk, state) {
// If same format, return as-is
if (sourceFormat === targetFormat) {
return [chunk];
}
let results = [chunk];
// Step 1: target -> openai (if target is not openai)
if (targetFormat !== FORMATS.OPENAI) {
const toOpenAI = responseRegistry.get(`${targetFormat}:${FORMATS.OPENAI}`);
if (toOpenAI) {
results = [];
const converted = toOpenAI(chunk, state);
if (converted) {
results = Array.isArray(converted) ? converted : [converted];
}
}
}
// Step 2: openai -> source (if source is not openai)
if (sourceFormat !== FORMATS.OPENAI) {
const fromOpenAI = responseRegistry.get(`${FORMATS.OPENAI}:${sourceFormat}`);
if (fromOpenAI) {
const finalResults = [];
for (const r of results) {
const converted = fromOpenAI(r, state);
if (converted) {
finalResults.push(...(Array.isArray(converted) ? converted : [converted]));
}
}
results = finalResults;
}
}
return results;
}
// Check if translation needed
export function needsTranslation(sourceFormat, targetFormat) {
return sourceFormat !== targetFormat;
}
// Initialize state for streaming response based on format
export function initState(sourceFormat) {
// Base state for all formats
const base = {
messageId: null,
model: null,
textBlockStarted: false,
thinkingBlockStarted: false,
inThinkingBlock: false,
currentBlockIndex: null,
toolCalls: new Map(),
finishReason: null,
finishReasonSent: false,
usage: null,
contentBlockIndex: -1
};
// Add openai-responses specific fields
if (sourceFormat === FORMATS.OPENAI_RESPONSES) {
return {
...base,
seq: 0,
responseId: `resp_${Date.now()}`,
created: Math.floor(Date.now() / 1000),
started: false,
msgTextBuf: {},
msgItemAdded: {},
msgContentAdded: {},
msgItemDone: {},
reasoningId: "",
reasoningIndex: -1,
reasoningBuf: "",
reasoningPartAdded: false,
reasoningDone: false,
inThinking: false,
funcArgsBuf: {},
funcNames: {},
funcCallIds: {},
funcArgsDone: {},
funcItemDone: {},
completedSent: false
};
}
return base;
}
// Initialize all translators
export async function initTranslators() {
await import("./to-openai/claude.js");
await import("./to-openai/gemini.js");
await import("./to-openai/openai.js");
await import("./to-openai/openai-responses.js");
await import("./from-openai/claude.js");
await import("./from-openai/gemini.js");
await import("./from-openai/openai-responses.js");
}

View File

@@ -0,0 +1,239 @@
import { register } from "../index.js";
import { FORMATS } from "../formats.js";
import { adjustMaxTokens } from "../helpers/maxTokensHelper.js";
// Convert Claude request to OpenAI format
function claudeToOpenAI(model, body, stream) {
const result = {
model: model,
messages: [],
stream: stream
};
// Max tokens
if (body.max_tokens) {
result.max_tokens = adjustMaxTokens(body);
}
// Temperature
if (body.temperature !== undefined) {
result.temperature = body.temperature;
}
// System message
if (body.system) {
const systemContent = Array.isArray(body.system)
? body.system.map(s => s.text || "").join("\n")
: body.system;
if (systemContent) {
result.messages.push({
role: "system",
content: systemContent
});
}
}
// Convert messages
if (body.messages && Array.isArray(body.messages)) {
for (let i = 0; i < body.messages.length; i++) {
const msg = body.messages[i];
const converted = convertClaudeMessage(msg);
if (converted) {
// Handle array of messages (multiple tool results)
if (Array.isArray(converted)) {
result.messages.push(...converted);
} else {
result.messages.push(converted);
}
}
}
}
// Fix missing tool responses - OpenAI requires every tool_call to have a response
fixMissingToolResponses(result.messages);
// Tools
if (body.tools && Array.isArray(body.tools)) {
result.tools = body.tools.map(tool => ({
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: tool.input_schema || { type: "object", properties: {} }
}
}));
}
// Tool choice
if (body.tool_choice) {
result.tool_choice = convertToolChoice(body.tool_choice);
}
return result;
}
// Fix missing tool responses - add empty responses for tool_calls without responses
function fixMissingToolResponses(messages) {
for (let i = 0; i < messages.length; i++) {
const msg = messages[i];
if (msg.role === "assistant" && msg.tool_calls && msg.tool_calls.length > 0) {
const toolCallIds = msg.tool_calls.map(tc => tc.id);
// Collect all tool response IDs that IMMEDIATELY follow this assistant message
// Stop at any non-tool message (user or assistant)
const respondedIds = new Set();
let insertPosition = i + 1;
for (let j = i + 1; j < messages.length; j++) {
const nextMsg = messages[j];
if (nextMsg.role === "tool" && nextMsg.tool_call_id) {
respondedIds.add(nextMsg.tool_call_id);
insertPosition = j + 1;
} else {
// Stop at any non-tool message (user or assistant)
break;
}
}
// Find missing responses and insert them
const missingIds = toolCallIds.filter(id => !respondedIds.has(id));
if (missingIds.length > 0) {
const missingResponses = missingIds.map(id => ({
role: "tool",
tool_call_id: id,
content: "[No response received]"
}));
// Insert missing responses at the correct position
messages.splice(insertPosition, 0, ...missingResponses);
// Adjust index to skip inserted messages
i = insertPosition + missingResponses.length - 1;
}
}
}
}
// Convert single Claude message - returns single message or array of messages
function convertClaudeMessage(msg) {
const role = msg.role === "user" || msg.role === "tool" ? "user" : "assistant";
// Simple string content
if (typeof msg.content === "string") {
return { role, content: msg.content };
}
// Array content
if (Array.isArray(msg.content)) {
const parts = [];
const toolCalls = [];
const toolResults = [];
for (const block of msg.content) {
switch (block.type) {
case "text":
parts.push({ type: "text", text: block.text });
break;
case "image":
if (block.source?.type === "base64") {
parts.push({
type: "image_url",
image_url: {
url: `data:${block.source.media_type};base64,${block.source.data}`
}
});
}
break;
case "tool_use":
toolCalls.push({
id: block.id,
type: "function",
function: {
name: block.name,
arguments: JSON.stringify(block.input || {})
}
});
break;
case "tool_result":
// Extract actual content from tool_result
let resultContent = "";
if (typeof block.content === "string") {
resultContent = block.content;
} else if (Array.isArray(block.content)) {
// Claude tool_result content can be array of text blocks
resultContent = block.content
.filter(c => c.type === "text")
.map(c => c.text)
.join("\n") || JSON.stringify(block.content);
} else if (block.content) {
resultContent = JSON.stringify(block.content);
}
toolResults.push({
role: "tool",
tool_call_id: block.tool_use_id,
content: resultContent
});
break;
}
}
// If has tool results, return array of tool messages
if (toolResults.length > 0) {
// Also include text parts as user message if any
if (parts.length > 0) {
const textContent = parts.length === 1 && parts[0].type === "text"
? parts[0].text
: parts;
return [...toolResults, { role: "user", content: textContent }];
}
return toolResults;
}
// If has tool calls, return assistant message with tool_calls
if (toolCalls.length > 0) {
const result = { role: "assistant" };
if (parts.length > 0) {
result.content = parts.length === 1 && parts[0].type === "text"
? parts[0].text
: parts;
}
result.tool_calls = toolCalls;
return result;
}
// Return content
if (parts.length > 0) {
return {
role,
content: parts.length === 1 && parts[0].type === "text" ? parts[0].text : parts
};
}
// Empty content array - return empty string content to keep message in conversation
if (msg.content.length === 0) {
return { role, content: "" };
}
}
return null;
}
// Convert tool choice
function convertToolChoice(choice) {
if (!choice) return "auto";
if (typeof choice === "string") return choice;
switch (choice.type) {
case "auto": return "auto";
case "any": return "required";
case "tool": return { type: "function", function: { name: choice.name } };
default: return "auto";
}
}
// Register
register(FORMATS.CLAUDE, FORMATS.OPENAI, claudeToOpenAI, null);

View File

@@ -0,0 +1,154 @@
import { register } from "../index.js";
import { FORMATS } from "../formats.js";
import { adjustMaxTokens } from "../helpers/maxTokensHelper.js";
// Convert Gemini request to OpenAI format
function geminiToOpenAI(model, body, stream) {
const result = {
model: model,
messages: [],
stream: stream
};
// Generation config
if (body.generationConfig) {
const config = body.generationConfig;
if (config.maxOutputTokens) {
// Create temporary body object for adjustMaxTokens
const tempBody = { max_tokens: config.maxOutputTokens, tools: body.tools };
result.max_tokens = adjustMaxTokens(tempBody);
}
if (config.temperature !== undefined) {
result.temperature = config.temperature;
}
if (config.topP !== undefined) {
result.top_p = config.topP;
}
}
// System instruction
if (body.systemInstruction) {
const systemText = extractGeminiText(body.systemInstruction);
if (systemText) {
result.messages.push({
role: "system",
content: systemText
});
}
}
// Convert contents to messages
if (body.contents && Array.isArray(body.contents)) {
for (const content of body.contents) {
const converted = convertGeminiContent(content);
if (converted) {
result.messages.push(converted);
}
}
}
// Tools
if (body.tools && Array.isArray(body.tools)) {
result.tools = [];
for (const tool of body.tools) {
if (tool.functionDeclarations) {
for (const func of tool.functionDeclarations) {
result.tools.push({
type: "function",
function: {
name: func.name,
description: func.description || "",
parameters: func.parameters || { type: "object", properties: {} }
}
});
}
}
}
}
return result;
}
// Convert Gemini content to OpenAI message
function convertGeminiContent(content) {
const role = content.role === "user" ? "user" : "assistant";
if (!content.parts || !Array.isArray(content.parts)) {
return null;
}
const parts = [];
const toolCalls = [];
for (const part of content.parts) {
// Text
if (part.text !== undefined) {
parts.push({ type: "text", text: part.text });
}
// Image
if (part.inlineData) {
parts.push({
type: "image_url",
image_url: {
url: `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`
}
});
}
// Function call
if (part.functionCall) {
toolCalls.push({
id: `call_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`,
type: "function",
function: {
name: part.functionCall.name,
arguments: JSON.stringify(part.functionCall.args || {})
}
});
}
// Function response - use id if available, fallback to name
if (part.functionResponse) {
return {
role: "tool",
tool_call_id: part.functionResponse.id || part.functionResponse.name,
content: JSON.stringify(part.functionResponse.response?.result || part.functionResponse.response || {})
};
}
}
// Has tool calls
if (toolCalls.length > 0) {
const result = { role: "assistant" };
if (parts.length > 0) {
result.content = parts.length === 1 ? parts[0].text : parts;
}
result.tool_calls = toolCalls;
return result;
}
// Regular message
if (parts.length > 0) {
return {
role,
content: parts.length === 1 && parts[0].type === "text" ? parts[0].text : parts
};
}
return null;
}
// Extract text from Gemini content
function extractGeminiText(content) {
if (typeof content === "string") return content;
if (content.parts && Array.isArray(content.parts)) {
return content.parts.map(p => p.text || "").join("");
}
return "";
}
// Register
register(FORMATS.GEMINI, FORMATS.OPENAI, geminiToOpenAI, null);
register(FORMATS.GEMINI_CLI, FORMATS.OPENAI, geminiToOpenAI, null);

View File

@@ -0,0 +1,140 @@
/**
* Translator: OpenAI Responses API → OpenAI Chat Completions
*
* Responses API uses: { input: [...], instructions: "..." }
* Chat API uses: { messages: [...] }
*/
import { register } from "../index.js";
import { FORMATS } from "../formats.js";
/**
* Convert OpenAI Responses API request to OpenAI Chat Completions format
*/
function translateRequest(model, body, stream, credentials) {
if (!body.input) return body;
const result = { ...body };
result.messages = [];
// Convert instructions to system message
if (body.instructions) {
result.messages.push({ role: "system", content: body.instructions });
}
// Group items by conversation turn
let currentAssistantMsg = null;
let pendingToolResults = [];
for (const item of body.input) {
if (item.type === "message") {
// Flush any pending assistant message with tool calls
if (currentAssistantMsg) {
result.messages.push(currentAssistantMsg);
currentAssistantMsg = null;
}
// Flush pending tool results
if (pendingToolResults.length > 0) {
for (const tr of pendingToolResults) {
result.messages.push(tr);
}
pendingToolResults = [];
}
// Convert content: input_text → text, output_text → text
const content = Array.isArray(item.content)
? item.content.map(c => {
if (c.type === "input_text") return { type: "text", text: c.text };
if (c.type === "output_text") return { type: "text", text: c.text };
return c;
})
: item.content;
result.messages.push({ role: item.role, content });
}
else if (item.type === "function_call") {
// Start or append to assistant message with tool_calls
if (!currentAssistantMsg) {
currentAssistantMsg = {
role: "assistant",
content: null,
tool_calls: []
};
}
currentAssistantMsg.tool_calls.push({
id: item.call_id,
type: "function",
function: {
name: item.name,
arguments: item.arguments
}
});
}
else if (item.type === "function_call_output") {
// Flush assistant message first if exists
if (currentAssistantMsg) {
result.messages.push(currentAssistantMsg);
currentAssistantMsg = null;
}
// Flush any pending tool results first
if (pendingToolResults.length > 0) {
for (const tr of pendingToolResults) {
result.messages.push(tr);
}
pendingToolResults = [];
}
// Add tool result immediately (not pending)
result.messages.push({
role: "tool",
tool_call_id: item.call_id,
content: typeof item.output === "string" ? item.output : JSON.stringify(item.output)
});
}
else if (item.type === "reasoning") {
// Skip reasoning items - they are for display only
continue;
}
}
// Flush remaining
if (currentAssistantMsg) {
result.messages.push(currentAssistantMsg);
}
if (pendingToolResults.length > 0) {
for (const tr of pendingToolResults) {
result.messages.push(tr);
}
}
// Tools are already in OpenAI format, just keep them
// Responses API tools: { type: "function", name, description, parameters }
// OpenAI tools: { type: "function", function: { name, description, parameters } }
if (body.tools && Array.isArray(body.tools)) {
result.tools = body.tools.map(tool => {
// Already has function wrapper
if (tool.function) return tool;
// Responses API format: flatten to OpenAI format
return {
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: tool.parameters,
strict: tool.strict
}
};
});
}
// Cleanup Responses API specific fields
delete result.input;
delete result.instructions;
delete result.include;
delete result.prompt_cache_key;
delete result.store;
delete result.reasoning;
return result;
}
// Register translator
register(FORMATS.OPENAI_RESPONSES, FORMATS.OPENAI, translateRequest, null);

View File

@@ -0,0 +1,372 @@
import { register } from "../index.js";
import { FORMATS } from "../formats.js";
import { CLAUDE_SYSTEM_PROMPT } from "../../config/constants.js";
import { adjustMaxTokens } from "../helpers/maxTokensHelper.js";
// Convert OpenAI request to Claude format
function openaiToClaude(model, body, stream) {
const result = {
model: model,
max_tokens: adjustMaxTokens(body),
stream: stream
};
// Temperature
if (body.temperature !== undefined) {
result.temperature = body.temperature;
}
// Messages
result.messages = [];
const systemParts = [];
if (body.messages && Array.isArray(body.messages)) {
// Extract system messages
for (const msg of body.messages) {
if (msg.role === "system") {
systemParts.push(typeof msg.content === "string" ? msg.content : extractTextContent(msg.content));
}
}
// Filter out system messages for separate processing
const nonSystemMessages = body.messages.filter(m => m.role !== "system");
// Process messages with merging logic
// CRITICAL: tool_result must be in separate message immediately after tool_use
let currentRole = undefined;
let currentParts = [];
const flushCurrentMessage = () => {
if (currentRole && currentParts.length > 0) {
result.messages.push({ role: currentRole, content: currentParts });
currentParts = [];
}
};
for (const msg of nonSystemMessages) {
const newRole = (msg.role === "user" || msg.role === "tool") ? "user" : "assistant";
const blocks = getContentBlocksFromMessage(msg);
const hasToolUse = blocks.some(b => b.type === "tool_use");
const hasToolResult = blocks.some(b => b.type === "tool_result");
// Separate tool_result from other content
if (hasToolResult) {
const toolResultBlocks = blocks.filter(b => b.type === "tool_result");
const otherBlocks = blocks.filter(b => b.type !== "tool_result");
// Flush current message first
flushCurrentMessage();
// Add tool_result as separate user message
if (toolResultBlocks.length > 0) {
result.messages.push({ role: "user", content: toolResultBlocks });
}
// Add other blocks to current parts for next message
if (otherBlocks.length > 0) {
currentRole = newRole;
currentParts.push(...otherBlocks);
}
continue;
}
if (currentRole !== newRole) {
flushCurrentMessage();
currentRole = newRole;
}
currentParts.push(...blocks);
if (hasToolUse) {
flushCurrentMessage();
}
}
flushCurrentMessage();
// Add cache_control to last assistant message (like worker.old)
for (let i = result.messages.length - 1; i >= 0; i--) {
const message = result.messages[i];
if (message.role === "assistant" && Array.isArray(message.content) && message.content.length > 0) {
const lastBlock = message.content[message.content.length - 1];
if (lastBlock) {
lastBlock.cache_control = { type: "ephemeral" };
break;
}
}
}
}
// System with Claude Code prompt and cache_control
const claudeCodePrompt = { type: "text", text: CLAUDE_SYSTEM_PROMPT };
if (systemParts.length > 0) {
const systemText = systemParts.join("\n");
result.system = [
claudeCodePrompt,
{ type: "text", text: systemText, cache_control: { type: "ephemeral", ttl: "1h" } }
];
} else {
result.system = [claudeCodePrompt];
}
// Tools - convert from OpenAI format to Claude format
if (body.tools && Array.isArray(body.tools)) {
result.tools = body.tools.map(tool => {
// Handle both OpenAI format {type: "function", function: {...}} and direct format
const toolData = tool.type === "function" && tool.function ? tool.function : tool;
return {
name: toolData.name,
description: toolData.description || "",
input_schema: toolData.parameters || toolData.input_schema || { type: "object", properties: {}, required: [] }
};
});
// Add cache control to last tool (like worker.old)
if (result.tools.length > 0) {
result.tools[result.tools.length - 1].cache_control = { type: "ephemeral", ttl: "1h" };
}
// console.log("[CLAUDE TOOLS DEBUG] Converted tools:", result.tools.map(t => t.name));
}
// Tool choice
if (body.tool_choice) {
result.tool_choice = convertOpenAIToolChoice(body.tool_choice);
}
return result;
}
// Convert OpenAI request to Gemini format
function openaiToGemini(model, body, stream) {
const result = {
contents: [],
generationConfig: {}
};
// Generation config
if (body.max_tokens) {
result.generationConfig.maxOutputTokens = body.max_tokens;
}
if (body.temperature !== undefined) {
result.generationConfig.temperature = body.temperature;
}
if (body.top_p !== undefined) {
result.generationConfig.topP = body.top_p;
}
// Messages
if (body.messages && Array.isArray(body.messages)) {
for (const msg of body.messages) {
if (msg.role === "system") {
result.systemInstruction = {
parts: [{ text: typeof msg.content === "string" ? msg.content : extractTextContent(msg.content) }]
};
} else if (msg.role === "tool") {
result.contents.push({
role: "function",
parts: [{
functionResponse: {
name: msg.tool_call_id,
response: tryParseJSON(msg.content)
}
}]
});
} else {
const converted = convertOpenAIToGeminiContent(msg);
if (converted) {
result.contents.push(converted);
}
}
}
}
// Tools
if (body.tools && Array.isArray(body.tools)) {
const validTools = body.tools.filter(tool => tool && tool.function && tool.function.name);
if (validTools.length > 0) {
result.tools = [{
functionDeclarations: validTools.map(tool => ({
name: tool.function.name,
description: tool.function.description || "",
parameters: tool.function.parameters || { type: "object", properties: {} }
}))
}];
}
}
return result;
}
// Get content blocks from single message (like src.cc getContentBlocksFromMessage)
function getContentBlocksFromMessage(msg) {
const blocks = [];
if (msg.role === "tool") {
blocks.push({
type: "tool_result",
tool_use_id: msg.tool_call_id,
content: msg.content
});
} else if (msg.role === "user") {
if (typeof msg.content === "string") {
if (msg.content) {
blocks.push({ type: "text", text: msg.content });
}
} else if (Array.isArray(msg.content)) {
for (const part of msg.content) {
if (part.type === "text" && part.text) {
blocks.push({ type: "text", text: part.text });
} else if (part.type === "tool_result") {
blocks.push({
type: "tool_result",
tool_use_id: part.tool_use_id,
content: part.content,
...(part.is_error && { is_error: part.is_error })
});
} else if (part.type === "image_url") {
const url = part.image_url.url;
const match = url.match(/^data:([^;]+);base64,(.+)$/);
if (match) {
blocks.push({
type: "image",
source: { type: "base64", media_type: match[1], data: match[2] }
});
}
} else if (part.type === "image" && part.source) {
blocks.push({ type: "image", source: part.source });
}
}
}
} else if (msg.role === "assistant") {
// Handle Anthropic format: content is array with tool_use blocks
if (Array.isArray(msg.content)) {
for (const part of msg.content) {
if (part.type === "text" && part.text) {
blocks.push({ type: "text", text: part.text });
} else if (part.type === "tool_use") {
blocks.push({ type: "tool_use", id: part.id, name: part.name, input: part.input });
}
}
} else if (msg.content) {
const text = typeof msg.content === "string" ? msg.content : extractTextContent(msg.content);
if (text) {
blocks.push({ type: "text", text });
}
}
// Handle OpenAI format: tool_calls array
if (msg.tool_calls && Array.isArray(msg.tool_calls)) {
for (const tc of msg.tool_calls) {
if (tc.type === "function") {
blocks.push({
type: "tool_use",
id: tc.id,
name: tc.function.name,
input: tryParseJSON(tc.function.arguments)
});
}
}
}
}
return blocks;
}
// Convert single OpenAI message to Claude format (for backward compatibility)
function convertOpenAIMessage(msg) {
const role = msg.role === "assistant" ? "assistant" : "user";
const content = convertOpenAIMessageContent(msg);
if (content.length === 0) return null;
return { role, content };
}
// Convert OpenAI message to Gemini content
function convertOpenAIToGeminiContent(msg) {
const role = msg.role === "assistant" ? "model" : "user";
const parts = [];
// Text content
if (typeof msg.content === "string") {
if (msg.content) {
parts.push({ text: msg.content });
}
} else if (Array.isArray(msg.content)) {
for (const part of msg.content) {
if (part.type === "text") {
parts.push({ text: part.text });
} else if (part.type === "image_url") {
const url = part.image_url.url;
if (url.startsWith("data:")) {
const match = url.match(/^data:([^;]+);base64,(.+)$/);
if (match) {
parts.push({
inlineData: {
mimeType: match[1],
data: match[2]
}
});
}
}
}
}
}
// Tool calls
if (msg.tool_calls && Array.isArray(msg.tool_calls)) {
for (const tc of msg.tool_calls) {
parts.push({
functionCall: {
name: tc.function.name,
args: tryParseJSON(tc.function.arguments)
}
});
}
}
if (parts.length === 0) return null;
return { role, parts };
}
// Convert tool choice
function convertOpenAIToolChoice(choice) {
if (!choice) return { type: "auto" };
// Passthrough if already Claude format
if (typeof choice === "object" && choice.type) return choice;
if (choice === "auto" || choice === "none") return { type: "auto" };
if (choice === "required") return { type: "any" };
if (typeof choice === "object" && choice.function) {
return { type: "tool", name: choice.function.name };
}
return { type: "auto" };
}
// Extract text from content
function extractTextContent(content) {
if (typeof content === "string") return content;
if (Array.isArray(content)) {
return content.filter(c => c.type === "text").map(c => c.text).join("\n");
}
return "";
}
// Try parse JSON
function tryParseJSON(str) {
if (typeof str !== "string") return str;
try {
return JSON.parse(str);
} catch {
return str;
}
}
// Register
register(FORMATS.OPENAI, FORMATS.CLAUDE, openaiToClaude, null);
register(FORMATS.OPENAI, FORMATS.GEMINI, openaiToGemini, null);
register(FORMATS.OPENAI, FORMATS.GEMINI_CLI, openaiToGemini, null);

View File

@@ -0,0 +1,266 @@
import { detectFormat } from "../services/provider.js";
import { translateResponse, initState } from "../translator/index.js";
import { FORMATS } from "../translator/formats.js";
import { SKIP_PATTERNS } from "../config/constants.js";
import { formatSSE } from "./stream.js";
/**
* Check for bypass patterns (warmup, skip) - return fake response without calling provider
* Supports both streaming and non-streaming responses
* Returns response in the correct sourceFormat using translator
*
* @param {object} body - Request body
* @param {string} model - Model name
* @returns {object|null} { success: true, response: Response } or null if not bypass
*/
export function handleBypassRequest(body, model) {
const messages = body.messages;
if (!messages?.length) return null;
// Helper to extract text from content
const getText = (content) => {
if (typeof content === "string") return content;
if (Array.isArray(content)) {
return content.filter(c => c.type === "text").map(c => c.text).join(" ");
}
return "";
};
let shouldBypass = false;
// Check warmup: first message "Warmup"
const firstText = getText(messages[0]?.content);
if (firstText === "Warmup") shouldBypass = true;
// Check count pattern: [{"role":"user","content":"count"}]
if (!shouldBypass &&
messages.length === 1 &&
messages[0]?.role === "user" &&
firstText === "count") {
shouldBypass = true;
}
// Check skip patterns
if (!shouldBypass && SKIP_PATTERNS?.length) {
const allText = messages.map(m => getText(m.content)).join(" ");
shouldBypass = SKIP_PATTERNS.some(p => allText.includes(p));
}
if (!shouldBypass) return null;
// Detect source format and stream mode
const sourceFormat = detectFormat(body);
const stream = body.stream !== false;
// Create bypass response using translator
if (stream) {
return createStreamingResponse(sourceFormat, model);
} else {
return createNonStreamingResponse(sourceFormat, model);
}
}
/**
* Create OpenAI standard format response
*/
function createOpenAIResponse(model) {
const id = `chatcmpl-${Date.now()}`;
const created = Math.floor(Date.now() / 1000);
const text = "CLI Command Execution: Clear Terminal";
return {
id,
object: "chat.completion",
created,
model,
choices: [{
index: 0,
message: {
role: "assistant",
content: text
},
finish_reason: "stop"
}],
usage: {
prompt_tokens: 1,
completion_tokens: 1,
total_tokens: 2
}
};
}
/**
* Create non-streaming response with translation
* Use translator to convert OpenAI → sourceFormat
*/
function createNonStreamingResponse(sourceFormat, model) {
const openaiResponse = createOpenAIResponse(model);
// If sourceFormat is OpenAI, return directly
if (sourceFormat === FORMATS.OPENAI) {
return {
success: true,
response: new Response(JSON.stringify(openaiResponse), {
headers: {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*"
}
})
};
}
// Use translator to convert: simulate streaming then collect all chunks
const state = initState(sourceFormat);
state.model = model;
const openaiChunks = createOpenAIStreamingChunks(openaiResponse);
const allTranslated = [];
for (const chunk of openaiChunks) {
const translated = translateResponse(FORMATS.OPENAI, sourceFormat, chunk, state);
if (translated?.length > 0) {
allTranslated.push(...translated);
}
}
// Flush remaining
const flushed = translateResponse(FORMATS.OPENAI, sourceFormat, null, state);
if (flushed?.length > 0) {
allTranslated.push(...flushed);
}
// For non-streaming, merge all chunks into final response
const finalResponse = mergeChunksToResponse(allTranslated, sourceFormat);
return {
success: true,
response: new Response(JSON.stringify(finalResponse), {
headers: {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*"
}
})
};
}
/**
* Create streaming response with translation
* Use translator to convert OpenAI chunks → sourceFormat
*/
function createStreamingResponse(sourceFormat, model) {
const openaiResponse = createOpenAIResponse(model);
const state = initState(sourceFormat);
state.model = model;
// Create OpenAI streaming chunks
const openaiChunks = createOpenAIStreamingChunks(openaiResponse);
// Translate each chunk to sourceFormat using translator
const translatedChunks = [];
for (const chunk of openaiChunks) {
const translated = translateResponse(FORMATS.OPENAI, sourceFormat, chunk, state);
if (translated?.length > 0) {
for (const item of translated) {
translatedChunks.push(formatSSE(item, sourceFormat));
}
}
}
// Flush remaining events
const flushed = translateResponse(FORMATS.OPENAI, sourceFormat, null, state);
if (flushed?.length > 0) {
for (const item of flushed) {
translatedChunks.push(formatSSE(item, sourceFormat));
}
}
// Add [DONE]
translatedChunks.push("data: [DONE]\n\n");
return {
success: true,
response: new Response(translatedChunks.join(""), {
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*"
}
})
};
}
/**
* Merge translated chunks into final response object (for non-streaming)
* Takes the last complete chunk as the final response
*/
function mergeChunksToResponse(chunks, sourceFormat) {
if (!chunks || chunks.length === 0) {
return createOpenAIResponse("unknown");
}
// For most formats, the last chunk before done contains the complete response
// Find the most complete chunk (usually the last one with content)
let finalChunk = chunks[chunks.length - 1];
// For Claude format, find the message_stop or final message
if (sourceFormat === FORMATS.CLAUDE) {
const messageStop = chunks.find(c => c.type === "message_stop");
if (messageStop) {
// Reconstruct complete message from chunks
const contentDelta = chunks.find(c => c.type === "content_block_delta");
const messageDelta = chunks.find(c => c.type === "message_delta");
const messageStart = chunks.find(c => c.type === "message_start");
if (messageStart?.message) {
finalChunk = messageStart.message;
// Merge usage if available
if (messageDelta?.usage) {
finalChunk.usage = messageDelta.usage;
}
}
}
}
return finalChunk;
}
/**
* Create OpenAI streaming chunks from complete response
*/
function createOpenAIStreamingChunks(completeResponse) {
const { id, created, model, choices } = completeResponse;
const content = choices[0].message.content;
return [
// Chunk with content
{
id,
object: "chat.completion.chunk",
created,
model,
choices: [{
index: 0,
delta: {
role: "assistant",
content
},
finish_reason: null
}]
},
// Final chunk with finish_reason
{
id,
object: "chat.completion.chunk",
created,
model,
choices: [{
index: 0,
delta: {},
finish_reason: "stop"
}],
usage: completeResponse.usage
}
];
}

133
open-sse/utils/error.js Normal file
View File

@@ -0,0 +1,133 @@
// OpenAI-compatible error types mapping
const ERROR_TYPES = {
400: { type: "invalid_request_error", code: "bad_request" },
401: { type: "authentication_error", code: "invalid_api_key" },
403: { type: "permission_error", code: "insufficient_quota" },
404: { type: "invalid_request_error", code: "model_not_found" },
429: { type: "rate_limit_error", code: "rate_limit_exceeded" },
500: { type: "server_error", code: "internal_server_error" },
502: { type: "server_error", code: "bad_gateway" },
503: { type: "server_error", code: "service_unavailable" },
504: { type: "server_error", code: "gateway_timeout" }
};
/**
* Build OpenAI-compatible error response body
* @param {number} statusCode - HTTP status code
* @param {string} message - Error message
* @returns {object} Error response object
*/
export function buildErrorBody(statusCode, message) {
const errorInfo = ERROR_TYPES[statusCode] ||
(statusCode >= 500
? { type: "server_error", code: "internal_server_error" }
: { type: "invalid_request_error", code: "" });
return {
error: {
message: message || getDefaultMessage(statusCode),
type: errorInfo.type,
code: errorInfo.code
}
};
}
/**
* Get default error message for status code
*/
function getDefaultMessage(statusCode) {
const messages = {
400: "Bad request",
401: "Invalid API key provided",
403: "You exceeded your current quota",
404: "Model not found",
429: "Rate limit exceeded",
500: "Internal server error",
502: "Bad gateway - upstream provider error",
503: "Service temporarily unavailable",
504: "Gateway timeout"
};
return messages[statusCode] || "An error occurred";
}
/**
* Create error Response object (for non-streaming)
* @param {number} statusCode - HTTP status code
* @param {string} message - Error message
* @returns {Response} HTTP Response object
*/
export function errorResponse(statusCode, message) {
return new Response(JSON.stringify(buildErrorBody(statusCode, message)), {
status: statusCode,
headers: {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*"
}
});
}
/**
* Write error to SSE stream (for streaming)
* @param {WritableStreamDefaultWriter} writer - Stream writer
* @param {number} statusCode - HTTP status code
* @param {string} message - Error message
*/
export async function writeStreamError(writer, statusCode, message) {
const errorBody = buildErrorBody(statusCode, message);
const encoder = new TextEncoder();
await writer.write(encoder.encode(`data: ${JSON.stringify(errorBody)}\n\n`));
}
/**
* Parse upstream provider error response
* @param {Response} response - Fetch response from provider
* @returns {Promise<{statusCode: number, message: string}>}
*/
export async function parseUpstreamError(response) {
let message = "";
try {
const text = await response.text();
// Try parse as JSON
try {
const json = JSON.parse(text);
message = json.error?.message || json.message || json.error || text;
} catch {
message = text;
}
} catch {
message = `Upstream error: ${response.status}`;
}
return {
statusCode: response.status,
message: typeof message === "string" ? message : JSON.stringify(message)
};
}
/**
* Create error result for chatCore handler
* @param {number} statusCode - HTTP status code
* @param {string} message - Error message
* @returns {{ success: false, status: number, error: string, response: Response }}
*/
export function createErrorResult(statusCode, message) {
return {
success: false,
status: statusCode,
error: message,
response: errorResponse(statusCode, message)
};
}
/**
* Format provider error with context
* @param {Error} error - Original error
* @param {string} provider - Provider name
* @param {string} model - Model name
* @returns {string} Formatted error message
*/
export function formatProviderError(error, provider, model) {
return error.message || "Unknown error";
}

View File

@@ -0,0 +1,82 @@
// Transform OpenAI SSE stream to Ollama JSON lines format
export function transformToOllama(response, model) {
let buffer = "";
let pendingToolCalls = {};
const transform = new TransformStream({
transform(chunk, controller) {
const text = new TextDecoder().decode(chunk);
buffer += text;
const lines = buffer.split("\n");
buffer = lines.pop() || "";
for (const line of lines) {
if (!line.startsWith("data:")) continue;
const data = line.slice(5).trim();
if (data === "[DONE]") {
const ollamaEnd = JSON.stringify({ model, message: { role: "assistant", content: "" }, done: true }) + "\n";
controller.enqueue(new TextEncoder().encode(ollamaEnd));
return;
}
try {
const parsed = JSON.parse(data);
const delta = parsed.choices?.[0]?.delta || {};
const content = delta.content || "";
const toolCalls = delta.tool_calls;
if (toolCalls) {
for (const tc of toolCalls) {
const idx = tc.index;
if (!pendingToolCalls[idx]) {
pendingToolCalls[idx] = { id: tc.id, function: { name: "", arguments: "" } };
}
if (tc.function?.name) pendingToolCalls[idx].function.name += tc.function.name;
if (tc.function?.arguments) pendingToolCalls[idx].function.arguments += tc.function.arguments;
}
}
if (content) {
const ollama = JSON.stringify({ model, message: { role: "assistant", content }, done: false }) + "\n";
controller.enqueue(new TextEncoder().encode(ollama));
}
const finishReason = parsed.choices?.[0]?.finish_reason;
if (finishReason === "tool_calls" || finishReason === "stop") {
const toolCallsArr = Object.values(pendingToolCalls);
if (toolCallsArr.length > 0) {
const formattedCalls = toolCallsArr.map(tc => ({
function: {
name: tc.function.name,
arguments: JSON.parse(tc.function.arguments || "{}")
}
}));
const ollama = JSON.stringify({
model,
message: { role: "assistant", content: "", tool_calls: formattedCalls },
done: true
}) + "\n";
controller.enqueue(new TextEncoder().encode(ollama));
pendingToolCalls = {};
} else if (finishReason === "stop") {
const ollamaEnd = JSON.stringify({ model, message: { role: "assistant", content: "" }, done: true }) + "\n";
controller.enqueue(new TextEncoder().encode(ollamaEnd));
}
}
} catch (e) {
// Silently ignore parse errors
}
}
},
flush(controller) {
const ollamaEnd = JSON.stringify({ model, message: { role: "assistant", content: "" }, done: true }) + "\n";
controller.enqueue(new TextEncoder().encode(ollamaEnd));
}
});
return new Response(response.body.pipeThrough(transform), {
headers: { "Content-Type": "application/x-ndjson", "Access-Control-Allow-Origin": "*" }
});
}

View File

@@ -0,0 +1,244 @@
// Check if running in Node.js environment (has fs module)
const isNode = typeof process !== "undefined" && process.versions?.node && typeof window === "undefined";
// Check if logging is enabled via environment variable (default: false)
const LOGGING_ENABLED = typeof process !== "undefined" && process.env?.ENABLE_REQUEST_LOGS === 'true';
let fs = null;
let path = null;
let LOGS_DIR = null;
// Lazy load Node.js modules (avoid top-level await)
async function ensureNodeModules() {
if (!isNode || !LOGGING_ENABLED || fs) return;
try {
fs = await import("fs");
path = await import("path");
LOGS_DIR = path.join(typeof process !== "undefined" && process.cwd ? process.cwd() : ".", "logs");
} catch {
// Running in non-Node environment (Worker, Browser, etc.)
}
}
// Format timestamp for folder name: 20251228_143045
function formatTimestamp(date = new Date()) {
const pad = (n) => String(n).padStart(2, "0");
const y = date.getFullYear();
const m = pad(date.getMonth() + 1);
const d = pad(date.getDate());
const h = pad(date.getHours());
const min = pad(date.getMinutes());
const s = pad(date.getSeconds());
return `${y}${m}${d}_${h}${min}${s}`;
}
// Create log session folder: {sourceFormat}_{targetFormat}_{model}_{timestamp}
async function createLogSession(sourceFormat, targetFormat, model) {
await ensureNodeModules();
if (!fs || !LOGS_DIR) return null;
try {
if (!fs.existsSync(LOGS_DIR)) {
fs.mkdirSync(LOGS_DIR, { recursive: true });
}
const timestamp = formatTimestamp();
const safeModel = model.replace(/[/:]/g, "-");
const folderName = `${sourceFormat}_${targetFormat}_${safeModel}_${timestamp}`;
const sessionPath = path.join(LOGS_DIR, folderName);
fs.mkdirSync(sessionPath, { recursive: true });
return sessionPath;
} catch (err) {
console.log("[LOG] Failed to create log session:", err.message);
return null;
}
}
// Write JSON file
function writeJsonFile(sessionPath, filename, data) {
if (!fs || !sessionPath) return;
try {
const filePath = path.join(sessionPath, filename);
fs.writeFileSync(filePath, JSON.stringify(data, null, 2));
} catch (err) {
console.log(`[LOG] Failed to write ${filename}:`, err.message);
}
}
// Mask sensitive data in headers
function maskSensitiveHeaders(headers) {
if (!headers) return {};
const masked = { ...headers };
const sensitiveKeys = ["authorization", "x-api-key", "cookie", "token"];
for (const key of Object.keys(masked)) {
const lowerKey = key.toLowerCase();
if (sensitiveKeys.some(sk => lowerKey.includes(sk))) {
const value = masked[key];
if (value && value.length > 20) {
masked[key] = value.slice(0, 10) + "..." + value.slice(-5);
}
}
}
return masked;
}
// No-op logger when logging is disabled
function createNoOpLogger() {
return {
sessionPath: null,
logClientRawRequest() {},
logRawRequest() {},
logFormatInfo() {},
logConvertedRequest() {},
logRawResponse() {},
logConvertedResponse() {},
logStreamChunk() {},
logStreamComplete() {},
logError() {}
};
}
/**
* Create a new log session and return logger functions
* @param {string} sourceFormat - Source format from client (claude, openai, etc.)
* @param {string} targetFormat - Target format to provider (antigravity, gemini-cli, etc.)
* @param {string} model - Model name
* @returns {Promise<object>} Promise that resolves to logger object with methods to log each stage
*/
export async function createRequestLogger(sourceFormat, targetFormat, model) {
// Return no-op logger if logging is disabled
if (!LOGGING_ENABLED) {
return createNoOpLogger();
}
// Wait for session to be created before returning logger
const sessionPath = await createLogSession(sourceFormat, targetFormat, model);
return {
get sessionPath() { return sessionPath; },
// 0. Log client raw request (before any conversion)
logClientRawRequest(endpoint, body, headers = {}) {
writeJsonFile(sessionPath, "0_client_raw_request.json", {
timestamp: new Date().toISOString(),
endpoint,
headers: maskSensitiveHeaders(headers),
body
});
},
// 1. Log raw request from client (after initial conversion like responsesApi)
logRawRequest(body, headers = {}) {
writeJsonFile(sessionPath, "1_raw_request.json", {
timestamp: new Date().toISOString(),
headers: maskSensitiveHeaders(headers),
body
});
},
// 1a. Log format detection info
logFormatInfo(info) {
writeJsonFile(sessionPath, "1a_format_info.json", {
timestamp: new Date().toISOString(),
...info
});
},
// 2. Log converted request to send to provider
logConvertedRequest(url, headers, body) {
writeJsonFile(sessionPath, "2_converted_request.json", {
timestamp: new Date().toISOString(),
url,
headers: maskSensitiveHeaders(headers),
body
});
},
// 3. Log provider response (for non-streaming or error)
logProviderResponse(status, statusText, headers, body) {
const filename = "3_provider_response.json";
writeJsonFile(sessionPath, filename, {
timestamp: new Date().toISOString(),
status,
statusText,
headers: headers ? (typeof headers.entries === "function" ? Object.fromEntries(headers.entries()) : headers) : {},
body
});
},
// 3. Append streaming chunk to provider response
appendProviderChunk(chunk) {
if (!fs || !sessionPath) return;
try {
const filePath = path.join(sessionPath, "3_provider_response.txt");
fs.appendFileSync(filePath, chunk);
} catch (err) {
// Ignore append errors
}
},
// 4. Log converted response to client (for non-streaming)
logConvertedResponse(body) {
writeJsonFile(sessionPath, "4_converted_response.json", {
timestamp: new Date().toISOString(),
body
});
},
// 4. Append streaming chunk to converted response
appendConvertedChunk(chunk) {
if (!fs || !sessionPath) return;
try {
const filePath = path.join(sessionPath, "4_converted_response.txt");
fs.appendFileSync(filePath, chunk);
} catch (err) {
// Ignore append errors
}
},
// 5. Log error
logError(error, requestBody = null) {
writeJsonFile(sessionPath, "5_error.json", {
timestamp: new Date().toISOString(),
error: error?.message || String(error),
stack: error?.stack,
requestBody
});
}
};
}
// Legacy functions for backward compatibility
export function logRequest() {}
export function logResponse() {}
export function logError(provider, { error, url, model, requestBody }) {
if (!fs || !LOGS_DIR) return;
try {
if (!fs.existsSync(LOGS_DIR)) {
fs.mkdirSync(LOGS_DIR, { recursive: true });
}
const date = new Date().toISOString().split("T")[0];
const logPath = path.join(LOGS_DIR, `${provider}-${date}.log`);
const logEntry = {
timestamp: new Date().toISOString(),
type: "error",
provider,
model,
url,
error: error?.message || String(error),
stack: error?.stack,
requestBody
};
fs.appendFileSync(logPath, JSON.stringify(logEntry) + "\n");
} catch (err) {
console.log("[LOG] Failed to write error log:", err.message);
}
}

278
open-sse/utils/stream.js Normal file
View File

@@ -0,0 +1,278 @@
import { translateResponse, initState } from "../translator/index.js";
import { FORMATS } from "../translator/formats.js";
// Get HH:MM timestamp
function getTimeString() {
return new Date().toLocaleTimeString("en-US", { hour12: false, hour: "2-digit", minute: "2-digit" });
}
// Extract usage from any format (Claude, OpenAI, Gemini)
function extractUsage(chunk) {
// Claude format (message_delta event)
if (chunk.type === "message_delta" && chunk.usage) {
return {
prompt_tokens: chunk.usage.input_tokens || 0,
completion_tokens: chunk.usage.output_tokens || 0,
cache_read_input_tokens: chunk.usage.cache_read_input_tokens,
cache_creation_input_tokens: chunk.usage.cache_creation_input_tokens
};
}
// OpenAI format
if (chunk.usage?.prompt_tokens !== undefined) {
return {
prompt_tokens: chunk.usage.prompt_tokens,
completion_tokens: chunk.usage.completion_tokens || 0,
cached_tokens: chunk.usage.prompt_tokens_details?.cached_tokens,
reasoning_tokens: chunk.usage.completion_tokens_details?.reasoning_tokens
};
}
// Gemini format
if (chunk.usageMetadata) {
return {
prompt_tokens: chunk.usageMetadata.promptTokenCount || 0,
completion_tokens: chunk.usageMetadata.candidatesTokenCount || 0,
reasoning_tokens: chunk.usageMetadata.thoughtsTokenCount
};
}
return null;
}
// ANSI color codes
export const COLORS = {
reset: "\x1b[0m",
red: "\x1b[31m",
green: "\x1b[32m",
yellow: "\x1b[33m",
blue: "\x1b[34m",
cyan: "\x1b[36m"
};
// Log usage with cache info (green color)
function logUsage(provider, usage) {
if (!usage) return;
const p = provider?.toUpperCase() || "UNKNOWN";
const inTokens = usage.prompt_tokens || 0;
const outTokens = usage.completion_tokens || 0;
let msg = `[${getTimeString()}] 📊 [USAGE] ${p} | in=${inTokens} | out=${outTokens}`;
if (usage.cache_creation_input_tokens) msg += ` | cache_write=${usage.cache_creation_input_tokens}`;
if (usage.cache_read_input_tokens) msg += ` | cache_read=${usage.cache_read_input_tokens}`;
if (usage.cached_tokens) msg += ` | cached=${usage.cached_tokens}`;
if (usage.reasoning_tokens) msg += ` | reasoning=${usage.reasoning_tokens}`;
console.log(`${COLORS.green}${msg}${COLORS.reset}`);
}
// Parse SSE data line
function parseSSELine(line) {
if (!line || !line.startsWith("data:")) return null;
const data = line.slice(5).trim();
if (data === "[DONE]") return { done: true };
try {
return JSON.parse(data);
} catch (error) {
// Log parse errors for debugging incomplete chunks
if (data.length > 0 && data.length < 1000) {
console.log(`[WARN] Failed to parse SSE line (${data.length} chars): ${data.substring(0, 100)}...`);
}
return null;
}
}
/**
* Format output as SSE
* @param {object} data - Data to format
* @param {string} sourceFormat - Target format for client
* @returns {string} SSE formatted string
*/
export function formatSSE(data, sourceFormat) {
if (data.done) return "data: [DONE]\n\n";
// OpenAI Responses API format: has event field
if (data.event && data.data) {
return `event: ${data.event}\ndata: ${JSON.stringify(data.data)}\n\n`;
}
// Claude format: include event prefix
if (sourceFormat === FORMATS.CLAUDE && data.type) {
return `event: ${data.type}\ndata: ${JSON.stringify(data)}\n\n`;
}
return `data: ${JSON.stringify(data)}\n\n`;
}
/**
* Stream modes
*/
const STREAM_MODE = {
TRANSLATE: "translate", // Full translation between formats
PASSTHROUGH: "passthrough" // No translation, normalize output, extract usage
};
/**
* Create unified SSE transform stream
* @param {object} options
* @param {string} options.mode - Stream mode: translate, passthrough
* @param {string} options.targetFormat - Provider format (for translate mode)
* @param {string} options.sourceFormat - Client format (for translate mode)
* @param {string} options.provider - Provider name
* @param {object} options.reqLogger - Request logger instance
*/
export function createSSEStream(options = {}) {
const {
mode = STREAM_MODE.TRANSLATE,
targetFormat,
sourceFormat,
provider = null,
reqLogger = null
} = options;
const decoder = new TextDecoder();
const encoder = new TextEncoder();
let buffer = "";
let usage = null;
// State for translate mode
const state = mode === STREAM_MODE.TRANSLATE ? { ...initState(sourceFormat), provider } : null;
return new TransformStream({
transform(chunk, controller) {
const text = decoder.decode(chunk, { stream: true });
buffer += text;
reqLogger?.appendProviderChunk?.(text);
const lines = buffer.split("\n");
buffer = lines.pop() || "";
for (const line of lines) {
const trimmed = line.trim();
// Passthrough mode: normalize and forward
if (mode === STREAM_MODE.PASSTHROUGH) {
if (trimmed.startsWith("data:") && trimmed.slice(5).trim() !== "[DONE]") {
try {
const parsed = JSON.parse(trimmed.slice(5).trim());
const extracted = extractUsage(parsed);
if (extracted) usage = extracted;
} catch {}
}
// Normalize: ensure "data: " has space
let output;
if (line.startsWith("data:") && !line.startsWith("data: ")) {
output = "data: " + line.slice(5) + "\n";
} else {
output = line + "\n";
}
reqLogger?.appendConvertedChunk?.(output);
controller.enqueue(encoder.encode(output));
continue;
}
// Translate mode
if (!trimmed) continue;
const parsed = parseSSELine(trimmed);
if (!parsed) continue;
if (parsed.done) {
const output = "data: [DONE]\n\n";
reqLogger?.appendConvertedChunk?.(output);
controller.enqueue(encoder.encode(output));
continue;
}
// Extract usage
const extracted = extractUsage(parsed);
if (extracted) state.usage = extracted;
// Translate and emit
const translated = translateResponse(targetFormat, sourceFormat, parsed, state);
if (translated?.length > 0) {
for (const item of translated) {
const output = formatSSE(item, sourceFormat);
reqLogger?.appendConvertedChunk?.(output);
controller.enqueue(encoder.encode(output));
}
}
}
},
flush(controller) {
try {
const remaining = decoder.decode();
if (remaining) buffer += remaining;
if (mode === STREAM_MODE.PASSTHROUGH) {
if (buffer) {
let output = buffer;
if (buffer.startsWith("data:") && !buffer.startsWith("data: ")) {
output = "data: " + buffer.slice(5);
}
reqLogger?.appendConvertedChunk?.(output);
controller.enqueue(encoder.encode(output));
}
if (usage) logUsage(provider, usage);
return;
}
// Translate mode: process remaining buffer
if (buffer.trim()) {
const parsed = parseSSELine(buffer.trim());
if (parsed && !parsed.done) {
const translated = translateResponse(targetFormat, sourceFormat, parsed, state);
if (translated?.length > 0) {
for (const item of translated) {
const output = formatSSE(item, sourceFormat);
reqLogger?.appendConvertedChunk?.(output);
controller.enqueue(encoder.encode(output));
}
}
}
}
// Flush remaining events (only once at stream end)
const flushed = translateResponse(targetFormat, sourceFormat, null, state);
if (flushed?.length > 0) {
for (const item of flushed) {
const output = formatSSE(item, sourceFormat);
reqLogger?.appendConvertedChunk?.(output);
controller.enqueue(encoder.encode(output));
}
}
// Send [DONE] and log usage
const doneOutput = "data: [DONE]\n\n";
reqLogger?.appendConvertedChunk?.(doneOutput);
controller.enqueue(encoder.encode(doneOutput));
if (state?.usage) logUsage(state.provider || targetFormat, state.usage);
} catch (error) {
console.log("Error in flush:", error);
}
}
});
}
// Convenience functions for backward compatibility
export function createSSETransformStreamWithLogger(targetFormat, sourceFormat, provider = null, reqLogger = null) {
return createSSEStream({
mode: STREAM_MODE.TRANSLATE,
targetFormat,
sourceFormat,
provider,
reqLogger
});
}
export function createPassthroughStreamWithLogger(provider = null, reqLogger = null) {
return createSSEStream({
mode: STREAM_MODE.PASSTHROUGH,
provider,
reqLogger
});
}

View File

@@ -0,0 +1,131 @@
// Stream handler with disconnect detection - shared for all providers
// Get HH:MM timestamp
function getTimeString() {
return new Date().toLocaleTimeString("en-US", { hour12: false, hour: "2-digit", minute: "2-digit" });
}
/**
* Create stream controller with abort and disconnect detection
* @param {object} options
* @param {function} options.onDisconnect - Callback when client disconnects
* @param {object} options.log - Logger instance
* @param {string} options.provider - Provider name
* @param {string} options.model - Model name
*/
export function createStreamController({ onDisconnect, log, provider, model } = {}) {
const abortController = new AbortController();
const startTime = Date.now();
let disconnected = false;
let abortTimeout = null;
const logStream = (status) => {
const duration = Date.now() - startTime;
const p = provider?.toUpperCase() || "UNKNOWN";
console.log(`[${getTimeString()}] 🌊 [STREAM] ${p} | ${model || "unknown"} | ${duration}ms | ${status}`);
};
return {
signal: abortController.signal,
startTime,
isConnected: () => !disconnected,
// Call when client disconnects
handleDisconnect: (reason = "client_closed") => {
if (disconnected) return;
disconnected = true;
logStream(`disconnect: ${reason}`);
// Delay abort to allow cleanup
abortTimeout = setTimeout(() => {
abortController.abort();
}, 500);
onDisconnect?.({ reason, duration: Date.now() - startTime });
},
// Call when stream completes normally
handleComplete: () => {
if (disconnected) return;
disconnected = true;
logStream("complete");
if (abortTimeout) {
clearTimeout(abortTimeout);
abortTimeout = null;
}
},
// Call on error
handleError: (error) => {
if (abortTimeout) {
clearTimeout(abortTimeout);
abortTimeout = null;
}
if (error.name === "AbortError") {
logStream("aborted");
return;
}
logStream(`error: ${error.message}`);
},
abort: () => abortController.abort()
};
}
/**
* Create transform stream with disconnect detection
* Wraps existing transform stream and adds abort capability
*/
export function createDisconnectAwareStream(transformStream, streamController) {
const reader = transformStream.readable.getReader();
const writer = transformStream.writable.getWriter();
return new ReadableStream({
async pull(controller) {
if (!streamController.isConnected()) {
controller.close();
return;
}
try {
const { done, value } = await reader.read();
if (done) {
streamController.handleComplete();
controller.close();
return;
}
controller.enqueue(value);
} catch (error) {
streamController.handleError(error);
controller.error(error);
}
},
cancel(reason) {
streamController.handleDisconnect(reason || "cancelled");
reader.cancel();
writer.abort();
}
});
}
/**
* Pipe provider response through transform with disconnect detection
* @param {Response} providerResponse - Response from provider
* @param {TransformStream} transformStream - Transform stream for SSE
* @param {object} streamController - Stream controller from createStreamController
*/
export function pipeWithDisconnect(providerResponse, transformStream, streamController) {
const transformedBody = providerResponse.body.pipeThrough(transformStream);
return createDisconnectAwareStream(
{ readable: transformedBody, writable: { getWriter: () => ({ abort: () => {} }) } },
streamController
);
}

View File

@@ -18,7 +18,6 @@
"next": "^15.2.0",
"node-machine-id": "^1.1.12",
"open": "^10.1.0",
"open-sse": "^1.0.0",
"ora": "^5.4.1",
"react": "19.2.1",
"react-dom": "19.2.1",
@@ -26,9 +25,6 @@
"uuid": "^13.0.0",
"zustand": "^5.0.9"
},
"overrides": {
"open-sse": "file:../open-sse"
},
"devDependencies": {
"@tailwindcss/postcss": "^4.1.18",
"eslint": "^9",