mirror of
https://github.com/decolua/9router.git
synced 2026-05-08 12:01:28 +00:00
feat(codex): Cursor compatibility + Next.js 16 proxy migration
- Force streaming for Codex/OpenAI models to fix non-streaming bug - Strip unsupported params (user, metadata, stream_options, prompt_cache_retention) - Force response translation from openai-responses to openai format - Migrate middleware.js to proxy.js for Next.js 16 - Use webpack explicitly in dev/build scripts - Updated Codex User-Agent
This commit is contained in:
@@ -39,9 +39,9 @@ export const PROVIDERS = {
|
||||
baseUrl: "https://chatgpt.com/backend-api/codex/responses",
|
||||
format: "openai-responses", // Use OpenAI Responses API format (reuse translator)
|
||||
headers: {
|
||||
"Version": "0.21.0",
|
||||
"Version": "0.92.0",
|
||||
"Openai-Beta": "responses=experimental",
|
||||
"User-Agent": "codex_cli_rs/0.50.0 (Mac OS 26.0.1; arm64)"
|
||||
"User-Agent": "codex-cli/0.92.0 (Windows 10.0.26100; x64)"
|
||||
},
|
||||
// OpenAI OAuth configuration
|
||||
clientId: "app_EMoamEEZ73f0CkXaXp7hrann",
|
||||
|
||||
@@ -32,6 +32,11 @@ export class CodexExecutor extends BaseExecutor {
|
||||
delete body.top_logprobs;
|
||||
delete body.n;
|
||||
delete body.seed;
|
||||
delete body.max_tokens;
|
||||
delete body.user; // Cursor sends this but Codex doesn't support it
|
||||
delete body.prompt_cache_retention; // Cursor sends this but Codex doesn't support it
|
||||
delete body.metadata; // Cursor sends this but Codex doesn't support it
|
||||
delete body.stream_options; // Cursor sends this but Codex doesn't support it
|
||||
|
||||
return body;
|
||||
}
|
||||
|
||||
@@ -253,11 +253,13 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
|
||||
const alias = PROVIDER_ID_TO_ALIAS[provider] || provider;
|
||||
const modelTargetFormat = getModelTargetFormat(alias, model);
|
||||
const targetFormat = modelTargetFormat || getTargetFormat(provider);
|
||||
const stream = body.stream !== false;
|
||||
|
||||
// Force streaming for OpenAI/Codex models (they don't support non-streaming mode properly)
|
||||
const stream = (provider === 'openai' || provider === 'codex') ? true : (body.stream !== false);
|
||||
|
||||
// Create request logger for this session: sourceFormat_targetFormat_model
|
||||
const reqLogger = await createRequestLogger(sourceFormat, targetFormat, model);
|
||||
|
||||
|
||||
// 0. Log client raw request (before any conversion)
|
||||
if (clientRawRequest) {
|
||||
reqLogger.logClientRawRequest(
|
||||
@@ -266,7 +268,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
|
||||
clientRawRequest.headers
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
// 1. Log raw request from client
|
||||
reqLogger.logRawRequest(body);
|
||||
|
||||
@@ -275,7 +277,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
|
||||
// Translate request (pass reqLogger for intermediate logging)
|
||||
let translatedBody = body;
|
||||
translatedBody = translateRequest(sourceFormat, targetFormat, model, body, stream, credentials, provider, reqLogger);
|
||||
|
||||
|
||||
// Extract toolNameMap for response translation (Claude OAuth)
|
||||
const toolNameMap = translatedBody._toolNameMap;
|
||||
delete translatedBody._toolNameMap;
|
||||
@@ -290,11 +292,11 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
|
||||
trackPendingRequest(model, provider, connectionId, true);
|
||||
|
||||
// Log start
|
||||
appendRequestLog({ model, provider, connectionId, status: "PENDING" }).catch(() => {});
|
||||
appendRequestLog({ model, provider, connectionId, status: "PENDING" }).catch(() => { });
|
||||
|
||||
const msgCount = translatedBody.messages?.length
|
||||
|| translatedBody.contents?.length
|
||||
|| translatedBody.request?.contents?.length
|
||||
const msgCount = translatedBody.messages?.length
|
||||
|| translatedBody.contents?.length
|
||||
|| translatedBody.request?.contents?.length
|
||||
|| 0;
|
||||
log?.debug?.("REQUEST", `${provider.toUpperCase()} | ${model} | ${msgCount} msgs`);
|
||||
|
||||
@@ -316,18 +318,18 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
|
||||
signal: streamController.signal,
|
||||
log
|
||||
});
|
||||
|
||||
|
||||
providerResponse = result.response;
|
||||
providerUrl = result.url;
|
||||
providerHeaders = result.headers;
|
||||
finalBody = result.transformedBody;
|
||||
|
||||
|
||||
// Log target request (final request to provider)
|
||||
reqLogger.logTargetRequest(providerUrl, providerHeaders, finalBody);
|
||||
|
||||
|
||||
} catch (error) {
|
||||
trackPendingRequest(model, provider, connectionId, false);
|
||||
appendRequestLog({ model, provider, connectionId, status: `FAILED ${error.name === "AbortError" ? 499 : 502}` }).catch(() => {});
|
||||
appendRequestLog({ model, provider, connectionId, status: `FAILED ${error.name === "AbortError" ? 499 : 502}` }).catch(() => { });
|
||||
if (error.name === "AbortError") {
|
||||
streamController.handleError(error);
|
||||
return createErrorResult(499, "Request aborted");
|
||||
@@ -347,7 +349,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
|
||||
|
||||
if (newCredentials?.accessToken || newCredentials?.copilotToken) {
|
||||
log?.info?.("TOKEN", `${provider.toUpperCase()} | refreshed`);
|
||||
|
||||
|
||||
// Update credentials
|
||||
Object.assign(credentials, newCredentials);
|
||||
|
||||
@@ -383,16 +385,16 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
|
||||
if (!providerResponse.ok) {
|
||||
trackPendingRequest(model, provider, connectionId, false);
|
||||
const { statusCode, message, retryAfterMs } = await parseUpstreamError(providerResponse, provider);
|
||||
appendRequestLog({ model, provider, connectionId, status: `FAILED ${statusCode}` }).catch(() => {});
|
||||
appendRequestLog({ model, provider, connectionId, status: `FAILED ${statusCode}` }).catch(() => { });
|
||||
const errMsg = formatProviderError(new Error(message), provider, model, statusCode);
|
||||
console.log(`${COLORS.red}[ERROR] ${errMsg}${COLORS.reset}`);
|
||||
|
||||
|
||||
// Log Antigravity retry time if available
|
||||
if (retryAfterMs && provider === "antigravity") {
|
||||
const retrySeconds = Math.ceil(retryAfterMs / 1000);
|
||||
log?.debug?.("RETRY", `Antigravity quota reset in ${retrySeconds}s (${retryAfterMs}ms)`);
|
||||
}
|
||||
|
||||
|
||||
// Log error with full request body for debugging
|
||||
reqLogger.logError(new Error(message), finalBody || translatedBody);
|
||||
|
||||
@@ -411,7 +413,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
|
||||
|
||||
// Log usage for non-streaming responses
|
||||
const usage = extractUsageFromResponse(responseBody, provider);
|
||||
appendRequestLog({ model, provider, connectionId, tokens: usage, status: "200 OK" }).catch(() => {});
|
||||
appendRequestLog({ model, provider, connectionId, tokens: usage, status: "200 OK" }).catch(() => { });
|
||||
if (usage) {
|
||||
const msg = `[${new Date().toLocaleTimeString("en-US", { hour12: false, hour: "2-digit", minute: "2-digit" })}] 📊 [USAGE] ${provider.toUpperCase()} | in=${usage.prompt_tokens || 0} | out=${usage.completion_tokens || 0}${connectionId ? ` | account=${connectionId.slice(0, 8)}...` : ""}`;
|
||||
console.log(`${COLORS.green}${msg}${COLORS.reset}`);
|
||||
@@ -444,7 +446,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
|
||||
}
|
||||
|
||||
// Streaming response
|
||||
|
||||
|
||||
// Notify success - caller can clear error status if needed
|
||||
if (onRequestSuccess) {
|
||||
await onRequestSuccess();
|
||||
@@ -459,8 +461,14 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
|
||||
|
||||
// Create transform stream with logger for streaming response
|
||||
let transformStream;
|
||||
if (needsTranslation(targetFormat, sourceFormat)) {
|
||||
transformStream = createSSETransformStreamWithLogger(targetFormat, sourceFormat, provider, reqLogger, toolNameMap, model, connectionId);
|
||||
// For Codex provider, always translate response from openai-responses to openai format
|
||||
// This ensures clients like Cursor get the expected chat completions format
|
||||
const needsCodexTranslation = (provider === 'codex' || provider === 'openai') && targetFormat === 'openai-responses';
|
||||
if (needsCodexTranslation || needsTranslation(targetFormat, sourceFormat)) {
|
||||
// For Codex, translate FROM openai-responses TO openai (client's expected format)
|
||||
const responseSourceFormat = needsCodexTranslation ? 'openai-responses' : targetFormat;
|
||||
const responseTargetFormat = needsCodexTranslation ? 'openai' : sourceFormat;
|
||||
transformStream = createSSETransformStreamWithLogger(responseSourceFormat, responseTargetFormat, provider, reqLogger, toolNameMap, model, connectionId);
|
||||
} else {
|
||||
transformStream = createPassthroughStreamWithLogger(provider, reqLogger, model, connectionId);
|
||||
}
|
||||
|
||||
24
package.json
24
package.json
@@ -4,8 +4,8 @@
|
||||
"description": "9Router web dashboard",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
"build": "next build",
|
||||
"dev": "next dev --webpack",
|
||||
"build": "next build --webpack",
|
||||
"start": "next start"
|
||||
},
|
||||
"dependencies": {
|
||||
@@ -18,21 +18,21 @@
|
||||
"jose": "^6.1.3",
|
||||
"lowdb": "^7.0.1",
|
||||
"monaco-editor": "^0.55.1",
|
||||
"next": "^15.2.0",
|
||||
"next": "^16.1.6",
|
||||
"node-machine-id": "^1.1.12",
|
||||
"open": "^10.1.0",
|
||||
"ora": "^5.4.1",
|
||||
"react": "19.2.1",
|
||||
"react-dom": "19.2.1",
|
||||
"socks-proxy-agent": "^8.0.4",
|
||||
"undici": "^7.16.0",
|
||||
"open": "^11.0.0",
|
||||
"ora": "^9.1.0",
|
||||
"react": "19.2.4",
|
||||
"react-dom": "19.2.4",
|
||||
"socks-proxy-agent": "^8.0.5",
|
||||
"undici": "^7.19.2",
|
||||
"uuid": "^13.0.0",
|
||||
"zustand": "^5.0.9"
|
||||
"zustand": "^5.0.10"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@tailwindcss/postcss": "^4.1.18",
|
||||
"eslint": "^9",
|
||||
"eslint-config-next": "16.0.10",
|
||||
"eslint-config-next": "16.1.6",
|
||||
"tailwindcss": "^4"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,7 +19,7 @@ export default function APIPageClient({ machineId }) {
|
||||
const [showCloudModal, setShowCloudModal] = useState(false);
|
||||
const [showDisableModal, setShowDisableModal] = useState(false);
|
||||
const [cloudSyncing, setCloudSyncing] = useState(false);
|
||||
const [setCloudStatus] = useState(null);
|
||||
const [cloudStatus, setCloudStatus] = useState(null);
|
||||
const [syncStep, setSyncStep] = useState(""); // "syncing" | "verifying" | "disabling" | ""
|
||||
|
||||
const { copied, copy } = useCopyToClipboard();
|
||||
|
||||
@@ -5,7 +5,7 @@ const SECRET = new TextEncoder().encode(
|
||||
process.env.JWT_SECRET || "9router-default-secret-change-me"
|
||||
);
|
||||
|
||||
export async function middleware(request) {
|
||||
export async function proxy(request) {
|
||||
const { pathname } = request.nextUrl;
|
||||
|
||||
// Protect all dashboard routes
|
||||
Reference in New Issue
Block a user