feat(codex): Cursor compatibility + Next.js 16 proxy migration

- Force streaming for Codex/OpenAI models to fix non-streaming bug
- Strip unsupported params (user, metadata, stream_options, prompt_cache_retention)
- Force response translation from openai-responses to openai format
- Migrate middleware.js to proxy.js for Next.js 16
- Use webpack explicitly in dev/build scripts
- Updated Codex User-Agent
This commit is contained in:
decolua
2026-02-02 09:17:15 +07:00
parent 814e02ae31
commit 7b864a9dcb
7 changed files with 85 additions and 51 deletions

View File

@@ -1,7 +1,7 @@
<div align="center"> <div align="center">
<img src="./images/9router.png" alt="9Router Dashboard" width="800"/> <img src="./images/9router.png" alt="9Router Dashboard" width="800"/>
# 9Router - FREE AI Coding + Cheap Backups # 9Router - FREE AI Coding
**Use Claude, Codex, Gemini for FREE • Ultra-cheap alternatives from $0.20/1M tokens** **Use Claude, Codex, Gemini for FREE • Ultra-cheap alternatives from $0.20/1M tokens**
@@ -32,6 +32,26 @@ Stop wasting your AI subscriptions and paying full price:
--- ---
## 🔧 How It Works
```
1. Install & Start
npm install -g 9router → Dashboard opens
2. Connect Providers
Dashboard → OAuth login (Claude, Gemini...)
OR → For free providers (iFlow, Qwen, Kiro...)
OR → Add API keys (GLM, iFlow...)
3. Point Your CLI (Cursor/Cline/Any tool..)
Cursor/Cline → http://localhost:20128/v1
Your Tool → 9Router → Providers
(Auto route + fallback)
```
---
## ⚡ Quick Start ## ⚡ Quick Start
**Install globally:** **Install globally:**
@@ -41,9 +61,9 @@ npm install -g 9router
9router 9router
``` ```
🎉 Dashboard opens → Connect Claude Code → Start coding! 🎉 Dashboard opens → Connect Free Providers→ Start coding!
**Use in Cursor/Cline:** **Use in Claude Code/Codex/Cursor/Cline/.....:**
``` ```
Endpoint: http://localhost:20128/v1 Endpoint: http://localhost:20128/v1
@@ -83,7 +103,6 @@ When subscription quota runs out, pay pennies:
### 🆓 FREE FOREVER (Fallback) ### 🆓 FREE FOREVER (Fallback)
No API key needed, unlimited:
| Provider | Top Models | Notes | | Provider | Top Models | Notes |
|----------|-----------|-------| |----------|-----------|-------|
@@ -128,6 +147,19 @@ Tier 3 (FREE): iFlow → Qwen → Kiro
--- ---
## ☁️ Cloud Deployment
### Why Cloud?
Use `https://9router.com` when localhost doesn't work:
-**Cursor IDE** - doesn't support localhost
-**Mobile coding** - iPad, phone access
-**No install needed** - use from anywhere
-**Global fast** - Cloudflare edge network (300+ locations)
---
## 📖 Setup Guide ## 📖 Setup Guide
<details> <details>
@@ -472,17 +504,6 @@ Daily routine:
→ Code 24/7 with minimal extra cost! → Code 24/7 with minimal extra cost!
``` ```
### Model Selection Guide
| Task | Best Model | Cost | Why |
|------|-----------|------|-----|
| Complex reasoning | `cc/claude-opus-4-5` | Subscription | Best quality |
| Fast coding | `cx/gpt-5.2-codex` | Subscription | Fastest Codex |
| Budget coding | `glm/glm-4.7` | $0.6/1M | Daily quota |
| Long context | `minimax/MiniMax-M2.1` | $0.20/1M | 1M context cheap |
| Emergency backup | `if/kimi-k2-thinking` | FREE | Unlimited |
---
## 📊 Available Models ## 📊 Available Models

View File

@@ -39,9 +39,9 @@ export const PROVIDERS = {
baseUrl: "https://chatgpt.com/backend-api/codex/responses", baseUrl: "https://chatgpt.com/backend-api/codex/responses",
format: "openai-responses", // Use OpenAI Responses API format (reuse translator) format: "openai-responses", // Use OpenAI Responses API format (reuse translator)
headers: { headers: {
"Version": "0.21.0", "Version": "0.92.0",
"Openai-Beta": "responses=experimental", "Openai-Beta": "responses=experimental",
"User-Agent": "codex_cli_rs/0.50.0 (Mac OS 26.0.1; arm64)" "User-Agent": "codex-cli/0.92.0 (Windows 10.0.26100; x64)"
}, },
// OpenAI OAuth configuration // OpenAI OAuth configuration
clientId: "app_EMoamEEZ73f0CkXaXp7hrann", clientId: "app_EMoamEEZ73f0CkXaXp7hrann",

View File

@@ -32,6 +32,11 @@ export class CodexExecutor extends BaseExecutor {
delete body.top_logprobs; delete body.top_logprobs;
delete body.n; delete body.n;
delete body.seed; delete body.seed;
delete body.max_tokens;
delete body.user; // Cursor sends this but Codex doesn't support it
delete body.prompt_cache_retention; // Cursor sends this but Codex doesn't support it
delete body.metadata; // Cursor sends this but Codex doesn't support it
delete body.stream_options; // Cursor sends this but Codex doesn't support it
return body; return body;
} }

View File

@@ -253,11 +253,13 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
const alias = PROVIDER_ID_TO_ALIAS[provider] || provider; const alias = PROVIDER_ID_TO_ALIAS[provider] || provider;
const modelTargetFormat = getModelTargetFormat(alias, model); const modelTargetFormat = getModelTargetFormat(alias, model);
const targetFormat = modelTargetFormat || getTargetFormat(provider); const targetFormat = modelTargetFormat || getTargetFormat(provider);
const stream = body.stream !== false;
// Force streaming for OpenAI/Codex models (they don't support non-streaming mode properly)
const stream = (provider === 'openai' || provider === 'codex') ? true : (body.stream !== false);
// Create request logger for this session: sourceFormat_targetFormat_model // Create request logger for this session: sourceFormat_targetFormat_model
const reqLogger = await createRequestLogger(sourceFormat, targetFormat, model); const reqLogger = await createRequestLogger(sourceFormat, targetFormat, model);
// 0. Log client raw request (before any conversion) // 0. Log client raw request (before any conversion)
if (clientRawRequest) { if (clientRawRequest) {
reqLogger.logClientRawRequest( reqLogger.logClientRawRequest(
@@ -266,7 +268,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
clientRawRequest.headers clientRawRequest.headers
); );
} }
// 1. Log raw request from client // 1. Log raw request from client
reqLogger.logRawRequest(body); reqLogger.logRawRequest(body);
@@ -275,7 +277,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
// Translate request (pass reqLogger for intermediate logging) // Translate request (pass reqLogger for intermediate logging)
let translatedBody = body; let translatedBody = body;
translatedBody = translateRequest(sourceFormat, targetFormat, model, body, stream, credentials, provider, reqLogger); translatedBody = translateRequest(sourceFormat, targetFormat, model, body, stream, credentials, provider, reqLogger);
// Extract toolNameMap for response translation (Claude OAuth) // Extract toolNameMap for response translation (Claude OAuth)
const toolNameMap = translatedBody._toolNameMap; const toolNameMap = translatedBody._toolNameMap;
delete translatedBody._toolNameMap; delete translatedBody._toolNameMap;
@@ -290,11 +292,11 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
trackPendingRequest(model, provider, connectionId, true); trackPendingRequest(model, provider, connectionId, true);
// Log start // Log start
appendRequestLog({ model, provider, connectionId, status: "PENDING" }).catch(() => {}); appendRequestLog({ model, provider, connectionId, status: "PENDING" }).catch(() => { });
const msgCount = translatedBody.messages?.length const msgCount = translatedBody.messages?.length
|| translatedBody.contents?.length || translatedBody.contents?.length
|| translatedBody.request?.contents?.length || translatedBody.request?.contents?.length
|| 0; || 0;
log?.debug?.("REQUEST", `${provider.toUpperCase()} | ${model} | ${msgCount} msgs`); log?.debug?.("REQUEST", `${provider.toUpperCase()} | ${model} | ${msgCount} msgs`);
@@ -316,18 +318,18 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
signal: streamController.signal, signal: streamController.signal,
log log
}); });
providerResponse = result.response; providerResponse = result.response;
providerUrl = result.url; providerUrl = result.url;
providerHeaders = result.headers; providerHeaders = result.headers;
finalBody = result.transformedBody; finalBody = result.transformedBody;
// Log target request (final request to provider) // Log target request (final request to provider)
reqLogger.logTargetRequest(providerUrl, providerHeaders, finalBody); reqLogger.logTargetRequest(providerUrl, providerHeaders, finalBody);
} catch (error) { } catch (error) {
trackPendingRequest(model, provider, connectionId, false); trackPendingRequest(model, provider, connectionId, false);
appendRequestLog({ model, provider, connectionId, status: `FAILED ${error.name === "AbortError" ? 499 : 502}` }).catch(() => {}); appendRequestLog({ model, provider, connectionId, status: `FAILED ${error.name === "AbortError" ? 499 : 502}` }).catch(() => { });
if (error.name === "AbortError") { if (error.name === "AbortError") {
streamController.handleError(error); streamController.handleError(error);
return createErrorResult(499, "Request aborted"); return createErrorResult(499, "Request aborted");
@@ -347,7 +349,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
if (newCredentials?.accessToken || newCredentials?.copilotToken) { if (newCredentials?.accessToken || newCredentials?.copilotToken) {
log?.info?.("TOKEN", `${provider.toUpperCase()} | refreshed`); log?.info?.("TOKEN", `${provider.toUpperCase()} | refreshed`);
// Update credentials // Update credentials
Object.assign(credentials, newCredentials); Object.assign(credentials, newCredentials);
@@ -383,16 +385,16 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
if (!providerResponse.ok) { if (!providerResponse.ok) {
trackPendingRequest(model, provider, connectionId, false); trackPendingRequest(model, provider, connectionId, false);
const { statusCode, message, retryAfterMs } = await parseUpstreamError(providerResponse, provider); const { statusCode, message, retryAfterMs } = await parseUpstreamError(providerResponse, provider);
appendRequestLog({ model, provider, connectionId, status: `FAILED ${statusCode}` }).catch(() => {}); appendRequestLog({ model, provider, connectionId, status: `FAILED ${statusCode}` }).catch(() => { });
const errMsg = formatProviderError(new Error(message), provider, model, statusCode); const errMsg = formatProviderError(new Error(message), provider, model, statusCode);
console.log(`${COLORS.red}[ERROR] ${errMsg}${COLORS.reset}`); console.log(`${COLORS.red}[ERROR] ${errMsg}${COLORS.reset}`);
// Log Antigravity retry time if available // Log Antigravity retry time if available
if (retryAfterMs && provider === "antigravity") { if (retryAfterMs && provider === "antigravity") {
const retrySeconds = Math.ceil(retryAfterMs / 1000); const retrySeconds = Math.ceil(retryAfterMs / 1000);
log?.debug?.("RETRY", `Antigravity quota reset in ${retrySeconds}s (${retryAfterMs}ms)`); log?.debug?.("RETRY", `Antigravity quota reset in ${retrySeconds}s (${retryAfterMs}ms)`);
} }
// Log error with full request body for debugging // Log error with full request body for debugging
reqLogger.logError(new Error(message), finalBody || translatedBody); reqLogger.logError(new Error(message), finalBody || translatedBody);
@@ -411,7 +413,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
// Log usage for non-streaming responses // Log usage for non-streaming responses
const usage = extractUsageFromResponse(responseBody, provider); const usage = extractUsageFromResponse(responseBody, provider);
appendRequestLog({ model, provider, connectionId, tokens: usage, status: "200 OK" }).catch(() => {}); appendRequestLog({ model, provider, connectionId, tokens: usage, status: "200 OK" }).catch(() => { });
if (usage) { if (usage) {
const msg = `[${new Date().toLocaleTimeString("en-US", { hour12: false, hour: "2-digit", minute: "2-digit" })}] 📊 [USAGE] ${provider.toUpperCase()} | in=${usage.prompt_tokens || 0} | out=${usage.completion_tokens || 0}${connectionId ? ` | account=${connectionId.slice(0, 8)}...` : ""}`; const msg = `[${new Date().toLocaleTimeString("en-US", { hour12: false, hour: "2-digit", minute: "2-digit" })}] 📊 [USAGE] ${provider.toUpperCase()} | in=${usage.prompt_tokens || 0} | out=${usage.completion_tokens || 0}${connectionId ? ` | account=${connectionId.slice(0, 8)}...` : ""}`;
console.log(`${COLORS.green}${msg}${COLORS.reset}`); console.log(`${COLORS.green}${msg}${COLORS.reset}`);
@@ -444,7 +446,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
} }
// Streaming response // Streaming response
// Notify success - caller can clear error status if needed // Notify success - caller can clear error status if needed
if (onRequestSuccess) { if (onRequestSuccess) {
await onRequestSuccess(); await onRequestSuccess();
@@ -459,8 +461,14 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
// Create transform stream with logger for streaming response // Create transform stream with logger for streaming response
let transformStream; let transformStream;
if (needsTranslation(targetFormat, sourceFormat)) { // For Codex provider, always translate response from openai-responses to openai format
transformStream = createSSETransformStreamWithLogger(targetFormat, sourceFormat, provider, reqLogger, toolNameMap, model, connectionId); // This ensures clients like Cursor get the expected chat completions format
const needsCodexTranslation = (provider === 'codex' || provider === 'openai') && targetFormat === 'openai-responses';
if (needsCodexTranslation || needsTranslation(targetFormat, sourceFormat)) {
// For Codex, translate FROM openai-responses TO openai (client's expected format)
const responseSourceFormat = needsCodexTranslation ? 'openai-responses' : targetFormat;
const responseTargetFormat = needsCodexTranslation ? 'openai' : sourceFormat;
transformStream = createSSETransformStreamWithLogger(responseSourceFormat, responseTargetFormat, provider, reqLogger, toolNameMap, model, connectionId);
} else { } else {
transformStream = createPassthroughStreamWithLogger(provider, reqLogger, model, connectionId); transformStream = createPassthroughStreamWithLogger(provider, reqLogger, model, connectionId);
} }

View File

@@ -4,8 +4,8 @@
"description": "9Router web dashboard", "description": "9Router web dashboard",
"private": true, "private": true,
"scripts": { "scripts": {
"dev": "next dev", "dev": "next dev --webpack",
"build": "next build", "build": "next build --webpack",
"start": "next start" "start": "next start"
}, },
"dependencies": { "dependencies": {
@@ -18,21 +18,21 @@
"jose": "^6.1.3", "jose": "^6.1.3",
"lowdb": "^7.0.1", "lowdb": "^7.0.1",
"monaco-editor": "^0.55.1", "monaco-editor": "^0.55.1",
"next": "^15.2.0", "next": "^16.1.6",
"node-machine-id": "^1.1.12", "node-machine-id": "^1.1.12",
"open": "^10.1.0", "open": "^11.0.0",
"ora": "^5.4.1", "ora": "^9.1.0",
"react": "19.2.1", "react": "19.2.4",
"react-dom": "19.2.1", "react-dom": "19.2.4",
"socks-proxy-agent": "^8.0.4", "socks-proxy-agent": "^8.0.5",
"undici": "^7.16.0", "undici": "^7.19.2",
"uuid": "^13.0.0", "uuid": "^13.0.0",
"zustand": "^5.0.9" "zustand": "^5.0.10"
}, },
"devDependencies": { "devDependencies": {
"@tailwindcss/postcss": "^4.1.18", "@tailwindcss/postcss": "^4.1.18",
"eslint": "^9", "eslint": "^9",
"eslint-config-next": "16.0.10", "eslint-config-next": "16.1.6",
"tailwindcss": "^4" "tailwindcss": "^4"
} }
} }

View File

@@ -19,7 +19,7 @@ export default function APIPageClient({ machineId }) {
const [showCloudModal, setShowCloudModal] = useState(false); const [showCloudModal, setShowCloudModal] = useState(false);
const [showDisableModal, setShowDisableModal] = useState(false); const [showDisableModal, setShowDisableModal] = useState(false);
const [cloudSyncing, setCloudSyncing] = useState(false); const [cloudSyncing, setCloudSyncing] = useState(false);
const [setCloudStatus] = useState(null); const [cloudStatus, setCloudStatus] = useState(null);
const [syncStep, setSyncStep] = useState(""); // "syncing" | "verifying" | "disabling" | "" const [syncStep, setSyncStep] = useState(""); // "syncing" | "verifying" | "disabling" | ""
const { copied, copy } = useCopyToClipboard(); const { copied, copy } = useCopyToClipboard();

View File

@@ -5,7 +5,7 @@ const SECRET = new TextEncoder().encode(
process.env.JWT_SECRET || "9router-default-secret-change-me" process.env.JWT_SECRET || "9router-default-secret-change-me"
); );
export async function middleware(request) { export async function proxy(request) {
const { pathname } = request.nextUrl; const { pathname } = request.nextUrl;
// Protect all dashboard routes // Protect all dashboard routes