feat(codex): add review model quota support (#836)

This commit is contained in:
Rezky Hamid
2026-05-03 14:57:33 +07:00
committed by GitHub
parent 14ff538f2e
commit a463ee00ff
8 changed files with 153 additions and 34 deletions

View File

@@ -5,6 +5,27 @@ import { buildTtsProviderModels } from "./ttsModels.js";
// Key = alias (cc, cx, gc, qw, if, ag, gh for OAuth; id for API Key)
// Field "provider" for special cases (e.g. AntiGravity models that call different backends)
const CODEX_REVIEW_SUFFIX = "-review";
function withCodexReviewModels(models) {
return models.flatMap((model) => {
if ((model.type || "llm") !== "llm" || model.id.endsWith(CODEX_REVIEW_SUFFIX)) {
return [model];
}
return [
model,
{
...model,
id: `${model.id}${CODEX_REVIEW_SUFFIX}`,
name: `${model.name} Review`,
upstreamModelId: model.upstreamModelId || model.id,
quotaFamily: "review",
},
];
});
}
export const PROVIDER_MODELS = {
// OAuth Providers (using alias)
cc: [ // Claude Code
@@ -15,7 +36,7 @@ export const PROVIDER_MODELS = {
{ id: "claude-sonnet-4-5-20250929", name: "Claude 4.5 Sonnet" },
{ id: "claude-haiku-4-5-20251001", name: "Claude 4.5 Haiku" },
],
cx: [ // OpenAI Codex
cx: withCodexReviewModels([ // OpenAI Codex
{ id: "gpt-5.5", name: "GPT 5.5" },
{ id: "gpt-5.4", name: "GPT 5.4" },
// GPT 5.3 Codex - all thinking levels
@@ -40,7 +61,7 @@ export const PROVIDER_MODELS = {
{ id: "gpt-5.4-image", name: "GPT 5.4 Image", type: "image", capabilities: ["text2img", "edit"], params: ["size", "quality", "background", "image_detail", "output_format"] },
{ id: "gpt-5.3-image", name: "GPT 5.3 Image", type: "image", capabilities: ["text2img", "edit"], params: ["size", "quality", "background", "image_detail", "output_format"] },
{ id: "gpt-5.2-image", name: "GPT 5.2 Image", type: "image", capabilities: ["text2img", "edit"], params: ["size", "quality", "background", "image_detail", "output_format"] },
],
]),
gc: [ // Gemini CLI
{ id: "gemini-3-flash-preview", name: "Gemini 3 Flash Preview" },
{ id: "gemini-3-pro-preview", name: "Gemini 3 Pro Preview" },
@@ -568,6 +589,22 @@ export function getModelTargetFormat(aliasOrId, modelId) {
return found?.targetFormat || null;
}
export function getModelUpstreamId(aliasOrId, modelId) {
const models = PROVIDER_MODELS[aliasOrId];
const found = models?.find(m => m.id === modelId);
if (found?.upstreamModelId) return found.upstreamModelId;
if (aliasOrId === "cx" && typeof modelId === "string" && modelId.endsWith(CODEX_REVIEW_SUFFIX)) {
return modelId.slice(0, -CODEX_REVIEW_SUFFIX.length);
}
return modelId;
}
export function getModelQuotaFamily(aliasOrId, modelId) {
const models = PROVIDER_MODELS[aliasOrId];
const found = models?.find(m => m.id === modelId);
return found?.quotaFamily || "normal";
}
// OAuth providers that use short aliases (everything else: alias = id)
const OAUTH_ALIASES = {
claude: "cc",

View File

@@ -4,6 +4,7 @@ import { CODEX_DEFAULT_INSTRUCTIONS } from "../config/codexInstructions.js";
import { PROVIDERS } from "../config/providers.js";
import { normalizeResponsesInput } from "../translator/helpers/responsesApiHelper.js";
import { fetchImageAsBase64 } from "../translator/helpers/imageHelper.js";
import { getModelUpstreamId } from "../config/providerModels.js";
import { getConsistentMachineId } from "../../src/shared/utils/machineId.js";
// In-memory map: hash(machineId + first assistant content) → { sessionId, lastUsed }
@@ -175,12 +176,15 @@ export class CodexExecutor extends BaseExecutor {
// Ensure store is false (Codex requirement)
body.store = false;
// Map virtual Codex review models to the upstream Codex model before suffix parsing.
body.model = getModelUpstreamId("cx", body.model || model);
// Extract thinking level from model name suffix
// e.g., gpt-5.3-codex-high → high, gpt-5.3-codex → medium (default)
const effortLevels = ['none', 'low', 'medium', 'high', 'xhigh'];
let modelEffort = null;
for (const level of effortLevels) {
if (model.endsWith(`-${level}`)) {
if (body.model.endsWith(`-${level}`)) {
modelEffort = level;
// Strip suffix from model name for actual API call
body.model = body.model.replace(`-${level}`, '');
@@ -212,6 +216,8 @@ export class CodexExecutor extends BaseExecutor {
delete body.n;
delete body.seed;
delete body.max_tokens;
delete body.max_completion_tokens;
delete body.max_output_tokens; // Responses API clients send this but Codex rejects it
delete body.user; // Cursor sends this but Codex doesn't support it
delete body.prompt_cache_retention; // Cursor sends this but Codex doesn't support it
delete body.metadata; // Cursor sends this but Codex doesn't support it

View File

@@ -488,6 +488,70 @@ async function getClaudeUsageLegacy(accessToken, proxyOptions = null) {
/**
* Codex (OpenAI) Usage - Fetch from ChatGPT backend API
*/
function toFiniteNumber(value, fallback = 0) {
if (typeof value === "number" && Number.isFinite(value)) return value;
if (typeof value === "string" && value.trim()) {
const parsed = Number(value);
if (Number.isFinite(parsed)) return parsed;
}
return fallback;
}
function getCodexRateLimitBody(snapshot) {
if (!snapshot || typeof snapshot !== "object" || Array.isArray(snapshot)) return null;
return snapshot.rate_limit && typeof snapshot.rate_limit === "object"
? snapshot.rate_limit
: snapshot;
}
function formatCodexWindow(window) {
const used = Math.max(0, Math.min(100, toFiniteNumber(window?.used_percent ?? window?.percent_used, 0)));
return {
used,
total: 100,
remaining: Math.max(0, 100 - used),
resetAt: parseResetTime(window?.reset_at ?? window?.resets_at ?? window?.resetAt ?? null),
unlimited: false,
};
}
function appendCodexQuotaWindows(quotas, prefix, snapshot) {
const rateLimit = getCodexRateLimitBody(snapshot);
if (!rateLimit) return false;
const primary = rateLimit.primary_window || rateLimit.primary || snapshot.primary_window || snapshot.primary;
const secondary = rateLimit.secondary_window || rateLimit.secondary || snapshot.secondary_window || snapshot.secondary;
let added = false;
if (primary) {
quotas[prefix ? `${prefix}_session` : "session"] = formatCodexWindow(primary);
added = true;
}
if (secondary) {
quotas[prefix ? `${prefix}_weekly` : "weekly"] = formatCodexWindow(secondary);
added = true;
}
return added;
}
function getCodexReviewRateLimit(data) {
if (data.code_review_rate_limit || data.review_rate_limit) {
return data.code_review_rate_limit || data.review_rate_limit;
}
const byLimitId = data.rate_limits_by_limit_id;
if (byLimitId && typeof byLimitId === "object" && !Array.isArray(byLimitId)) {
return byLimitId.code_review || byLimitId.codex_review || byLimitId.review || null;
}
const additional = Array.isArray(data.additional_rate_limits) ? data.additional_rate_limits : [];
return additional.find((entry) => {
const id = String(entry?.limit_name || entry?.metered_feature || entry?.id || "").toLowerCase();
return id === "code_review" || id === "codex_review" || id === "review" || id.includes("review");
}) || null;
}
async function getCodexUsage(accessToken, proxyOptions = null) {
try {
const response = await proxyAwareFetch(CODEX_CONFIG.usageUrl, {
@@ -503,35 +567,18 @@ async function getCodexUsage(accessToken, proxyOptions = null) {
}
const data = await response.json();
const normalRateLimit = data.rate_limit || data.rate_limits || data.rate_limits_by_limit_id?.codex || {};
const reviewRateLimit = getCodexReviewRateLimit(data);
const quotas = {};
// Parse rate limit info
const rateLimit = data.rate_limit || {};
const primaryWindow = rateLimit.primary_window || {};
const secondaryWindow = rateLimit.secondary_window || {};
// Parse reset dates (reset_at is Unix timestamp in seconds, multiply by 1000 for ms)
const sessionResetAt = parseResetTime(primaryWindow.reset_at ? primaryWindow.reset_at * 1000 : null);
const weeklyResetAt = parseResetTime(secondaryWindow.reset_at ? secondaryWindow.reset_at * 1000 : null);
appendCodexQuotaWindows(quotas, "", normalRateLimit);
appendCodexQuotaWindows(quotas, "review", reviewRateLimit);
return {
plan: data.plan_type || "unknown",
limitReached: rateLimit.limit_reached || false,
quotas: {
session: {
used: primaryWindow.used_percent || 0,
total: 100,
remaining: 100 - (primaryWindow.used_percent || 0),
resetAt: sessionResetAt,
unlimited: false,
},
weekly: {
used: secondaryWindow.used_percent || 0,
total: 100,
remaining: 100 - (secondaryWindow.used_percent || 0),
resetAt: weeklyResetAt,
unlimited: false,
},
},
plan: data.plan_type || data.summary?.plan || "unknown",
limitReached: getCodexRateLimitBody(normalRateLimit)?.limit_reached || false,
reviewLimitReached: getCodexRateLimitBody(reviewRateLimit)?.limit_reached || false,
quotas,
};
} catch (error) {
throw new Error(`Failed to fetch Codex usage: ${error.message}`);

View File

@@ -36,6 +36,27 @@ const parseGeminiCliModels = (data) => {
return [];
};
const appendCodexReviewModels = (models) => models.flatMap((model) => {
const id = model?.id || model?.slug || model?.model || model?.name;
if (!id) return [];
const name = model?.display_name || model?.displayName || model?.name || id;
const normalized = { ...model, id, name };
const isChatModel = (model?.type || "llm") !== "image" && !id.toLowerCase().includes("embed");
if (!isChatModel || id.endsWith("-review")) return [normalized];
return [
normalized,
{
...normalized,
id: `${id}-review`,
name: `${name} Review`,
upstreamModelId: id,
quotaFamily: "review",
},
];
});
const parseCodexModels = (data) => appendCodexReviewModels(parseOpenAIStyleModels(data));
const createOpenAIModelsConfig = (url) => ({
url,
method: "GET",
@@ -84,6 +105,14 @@ const PROVIDER_MODELS_CONFIG = {
authPrefix: "Bearer ",
parseResponse: (data) => data.data || []
},
codex: {
url: "https://chatgpt.com/backend-api/codex/models?client_version=1.0.0",
method: "GET",
headers: { "Content-Type": "application/json", "Accept": "application/json" },
authHeader: "Authorization",
authPrefix: "Bearer ",
parseResponse: parseCodexModels
},
antigravity: {
url: "https://daily-cloudcode-pa.sandbox.googleapis.com/v1internal:models",
method: "POST",

View File

@@ -415,7 +415,7 @@ export async function createProviderConnection(data) {
const optionalFields = [
"displayName", "email", "globalPriority", "defaultModel",
"accessToken", "refreshToken", "expiresAt", "tokenType",
"scope", "idToken", "projectId", "apiKey", "testStatus",
"scope", "projectId", "apiKey", "testStatus",
"lastTested", "lastError", "lastErrorAt", "rateLimitedUntil", "expiresIn", "errorCode",
"consecutiveUseCount"
];
@@ -681,7 +681,7 @@ export async function cleanupProviderConnections() {
const fieldsToCheck = [
"displayName", "email", "globalPriority", "defaultModel",
"accessToken", "refreshToken", "expiresAt", "tokenType",
"scope", "idToken", "projectId", "apiKey", "testStatus",
"scope", "projectId", "apiKey", "testStatus",
"lastTested", "lastError", "lastErrorAt", "rateLimitedUntil", "expiresIn",
"consecutiveUseCount"
];

View File

@@ -172,7 +172,6 @@ const PROVIDERS = {
const mapped = {
accessToken: tokens.access_token,
refreshToken: tokens.refresh_token,
idToken: tokens.id_token,
expiresIn: tokens.expires_in,
};
if (info.email) mapped.email = info.email;

View File

@@ -53,7 +53,6 @@ export class CodexService extends OAuthService {
body: JSON.stringify({
accessToken: tokens.access_token,
refreshToken: tokens.refresh_token,
idToken: tokens.id_token,
expiresIn: tokens.expires_in,
}),
});

View File

@@ -8,7 +8,9 @@ export {
getModelTargetFormat,
getModelStrip,
PROVIDER_ID_TO_ALIAS,
getModelsByProviderId
getModelsByProviderId,
getModelUpstreamId,
getModelQuotaFamily
} from "open-sse/config/providerModels.js";
import { AI_PROVIDERS, isOpenAICompatibleProvider } from "./providers.js";