chore: add Gemini 3.1 Pro models to provider configurations

This commit is contained in:
decolua
2026-02-22 15:20:24 +07:00
parent 930e917092
commit a5eb5a864e
9 changed files with 250 additions and 158 deletions

View File

@@ -60,6 +60,8 @@ export const PROVIDER_MODELS = {
{ id: "glm-5", name: "GLM 5" },
],
ag: [ // Antigravity - special case: models call different backends
{ id: "gemini-3.1-pro-high", name: "Gemini 3 Pro High" },
{ id: "gemini-3.1-pro-low", name: "Gemini 3 Pro Low" },
{ id: "gemini-3-flash", name: "Gemini 3 Flash" },
{ id: "claude-sonnet-4-6", name: "Claude Sonnet 4.6" },
{ id: "claude-opus-4-6-thinking", name: "Claude Opus 4.6 Thinking" },

View File

@@ -32,30 +32,21 @@ if (!isCloudEnv()) {
const COMPRESS_FLAG = {
NONE: 0x00,
GZIP: 0x01,
GZIP_ALT: 0x02,
GZIP_BOTH: 0x03
TRAILER: 0x02,
GZIP_TRAILER: 0x03
};
function decompressPayload(payload, flags) {
// Check if payload is JSON error (starts with {"error")
if (payload.length > 10 && payload[0] === 0x7b && payload[1] === 0x22) {
try {
const text = payload.toString('utf-8');
if (text.startsWith('{"error"')) {
console.log(`[DECOMPRESS] Detected JSON error, skipping decompression`);
return payload;
}
} catch {}
// ConnectRPC trailer frame (flags & 0x02) - contains status JSON, not compressed data
if (flags & COMPRESS_FLAG.TRAILER) {
return payload;
}
if (flags === COMPRESS_FLAG.GZIP || flags === COMPRESS_FLAG.GZIP_ALT || flags === COMPRESS_FLAG.GZIP_BOTH) {
if (flags === COMPRESS_FLAG.GZIP) {
try {
return zlib.gunzipSync(payload);
} catch (err) {
console.log(`[DECOMPRESS ERROR] flags=${flags}, payloadSize=${payload.length}, error=${err.message}`);
console.log(`[DECOMPRESS ERROR] First 50 bytes (hex):`, payload.slice(0, 50).toString('hex'));
console.log(`[DECOMPRESS ERROR] First 50 bytes (utf8):`, payload.slice(0, 50).toString('utf8').replace(/[^\x20-\x7E]/g, '.'));
// Try to use payload as-is if decompression fails
return payload;
}
}
@@ -165,7 +156,6 @@ export class CursorExecutor extends BaseExecutor {
}
transformRequest(model, body, stream, credentials) {
// Call translator to convert OpenAI format to Cursor format
const translatedBody = buildCursorRequest(model, body, stream, credentials);
const messages = translatedBody.messages || [];
const tools = translatedBody.tools || body.tools || [];
@@ -292,34 +282,26 @@ export class CursorExecutor extends BaseExecutor {
const toolCallsMap = new Map(); // Track streaming tool calls by ID
let frameCount = 0;
console.log(`[CURSOR BUFFER] Total length: ${buffer.length} bytes`);
while (offset < buffer.length) {
if (offset + 5 > buffer.length) {
console.log(`[CURSOR BUFFER] Reached end, offset=${offset}, remaining=${buffer.length - offset}`);
break;
}
if (offset + 5 > buffer.length) break;
const flags = buffer[offset];
const length = buffer.readUInt32BE(offset + 1);
console.log(`[CURSOR BUFFER] Frame ${frameCount + 1}: flags=0x${flags.toString(16).padStart(2, '0')}, length=${length}`);
if (offset + 5 + length > buffer.length) {
console.log(`[CURSOR BUFFER] Incomplete frame, offset=${offset}, length=${length}, buffer.length=${buffer.length}`);
break;
}
if (offset + 5 + length > buffer.length) break;
let payload = buffer.slice(offset + 5, offset + 5 + length);
offset += 5 + length;
frameCount++;
payload = decompressPayload(payload, flags);
if (!payload) {
console.log(`[CURSOR BUFFER] Frame ${frameCount}: decompression failed, skipping`);
continue;
// Stop at ConnectRPC trailer frame (end of response, anything after is a separate response)
if (flags & COMPRESS_FLAG.TRAILER) {
break;
}
payload = decompressPayload(payload, flags);
if (!payload) continue;
try {
const text = payload.toString("utf-8");
if (text.startsWith("{") && text.includes('"error"')) {
@@ -328,7 +310,6 @@ export class CursorExecutor extends BaseExecutor {
} catch {}
const result = extractTextFromResponse(new Uint8Array(payload));
console.log(`[CURSOR DECODED] Frame ${frameCount}:`, result);
if (result.error) {
return new Response(JSON.stringify({
@@ -373,13 +354,10 @@ export class CursorExecutor extends BaseExecutor {
if (result.text) totalContent += result.text;
}
console.log(`[CURSOR BUFFER] Parsed ${frameCount} frames, toolCallsMap size: ${toolCallsMap.size}, finalized toolCalls: ${toolCalls.length}`);
// Finalize all remaining tool calls in map (in case stream ended without isLast=true)
for (const [id, tc] of toolCallsMap.entries()) {
// Check if already in final array
if (!toolCalls.find(t => t.id === id)) {
console.log(`[CURSOR BUFFER] Finalizing incomplete tool call: ${id}, isLast=${tc.isLast}`);
toolCalls.push({
id: tc.id,
type: tc.type,
@@ -391,7 +369,6 @@ export class CursorExecutor extends BaseExecutor {
}
}
console.log(`[CURSOR BUFFER] Final toolCalls count: ${toolCalls.length}`);
const message = {
role: "assistant",
@@ -434,34 +411,26 @@ export class CursorExecutor extends BaseExecutor {
const toolCallsMap = new Map(); // Track streaming tool calls by ID
let frameCount = 0;
console.log(`[CURSOR BUFFER SSE] Total length: ${buffer.length} bytes`);
while (offset < buffer.length) {
if (offset + 5 > buffer.length) {
console.log(`[CURSOR BUFFER SSE] Reached end, offset=${offset}, remaining=${buffer.length - offset}`);
break;
}
if (offset + 5 > buffer.length) break;
const flags = buffer[offset];
const length = buffer.readUInt32BE(offset + 1);
console.log(`[CURSOR BUFFER SSE] Frame ${frameCount + 1}: flags=0x${flags.toString(16).padStart(2, '0')}, length=${length}`);
if (offset + 5 + length > buffer.length) {
console.log(`[CURSOR BUFFER SSE] Incomplete frame, offset=${offset}, length=${length}, buffer.length=${buffer.length}`);
break;
}
if (offset + 5 + length > buffer.length) break;
let payload = buffer.slice(offset + 5, offset + 5 + length);
offset += 5 + length;
frameCount++;
payload = decompressPayload(payload, flags);
if (!payload) {
console.log(`[CURSOR BUFFER SSE] Frame ${frameCount}: decompression failed, skipping`);
continue;
// Stop at ConnectRPC trailer frame (end of response, anything after is a separate response)
if (flags & COMPRESS_FLAG.TRAILER) {
break;
}
payload = decompressPayload(payload, flags);
if (!payload) continue;
try {
const text = payload.toString("utf-8");
if (text.startsWith("{") && text.includes('"error"')) {
@@ -470,7 +439,6 @@ export class CursorExecutor extends BaseExecutor {
} catch {}
const result = extractTextFromResponse(new Uint8Array(payload));
console.log(`[CURSOR DECODED SSE] Frame ${frameCount}:`, result);
if (result.error) {
return new Response(JSON.stringify({
@@ -582,7 +550,6 @@ export class CursorExecutor extends BaseExecutor {
}
}
console.log(`[CURSOR BUFFER SSE] Parsed ${frameCount} frames, toolCallsMap size: ${toolCallsMap.size}, toolCalls array: ${toolCalls.length}`);
if (chunks.length === 0 && toolCalls.length === 0) {
chunks.push(`data: ${JSON.stringify({

View File

@@ -246,12 +246,10 @@ async function getAntigravityUsage(accessToken, providerSpecificData) {
const importantModels = [
'claude-opus-4-6-thinking',
'claude-sonnet-4-6',
'gemini-3-1-pro-high',
'gemini-3-1-pro-low',
'gemini-3-pro-high',
'gemini-3-pro-low',
'gemini-3.1-pro-high',
'gemini-3.1-pro-low',
'gemini-3-flash',
'gemini-2.5-flash',
'gpt-oss-120b-medium',
];
for (const [modelKey, info] of Object.entries(data.models)) {

View File

@@ -1,97 +1,58 @@
/**
* OpenAI to Cursor Request Translator
* Converts OpenAI messages to Cursor simple format
* - assistant tool_calls → kept as-is (Cursor generates tool calls)
* - tool results → converted to user message string
*/
import { register } from "../index.js";
import { FORMATS } from "../formats.js";
/**
* Convert OpenAI messages to Cursor format with native tool_results support
* - system → user with [System Instructions] prefix
* - tool → accumulate into tool_results array for next user/assistant message
* - assistant with tool_calls → keep tool_calls structure (Cursor supports it natively)
*/
function extractContent(content) {
if (typeof content === "string") return content;
if (Array.isArray(content)) {
return content.filter(p => p.type === "text").map(p => p.text).join("");
}
return "";
}
function convertMessages(messages) {
const result = [];
let pendingToolResults = [];
for (let i = 0; i < messages.length; i++) {
const msg = messages[i];
if (msg.role === "system") {
result.push({
role: "user",
content: `[System Instructions]\n${msg.content}`
});
result.push({ role: "user", content: `[System Instructions]\n${msg.content}` });
continue;
}
if (msg.role === "user") {
result.push({ role: "user", content: extractContent(msg.content) || "" });
continue;
}
if (msg.role === "tool") {
let toolContent = "";
if (typeof msg.content === "string") {
toolContent = msg.content;
} else if (Array.isArray(msg.content)) {
for (const part of msg.content) {
if (part.type === "text") {
toolContent += part.text;
}
}
}
const toolName = msg.name || "tool";
// Strip system-reminder tags injected by Claude Code
const raw = extractContent(msg.content) || "";
const toolContent = raw.replace(/<system-reminder>[\s\S]*?<\/system-reminder>/g, "").trim();
// Find matching tool name from previous assistant message
const prevMsg = result[result.length - 1];
const toolName = prevMsg?.tool_calls?.[0]?.function?.name || "";
const toolCallId = msg.tool_call_id || "";
// Accumulate tool result
pendingToolResults.push({
tool_call_id: toolCallId,
name: toolName,
index: pendingToolResults.length,
raw_args: toolContent
result.push({
role: "user",
content: `<tool_result>\n<tool_name>${toolName}</tool_name>\n<tool_call_id>${toolCallId}</tool_call_id>\n<result>${toolContent}</result>\n</tool_result>`
});
continue;
}
if (msg.role === "user" || msg.role === "assistant") {
let content = "";
if (typeof msg.content === "string") {
content = msg.content;
} else if (Array.isArray(msg.content)) {
for (const part of msg.content) {
if (part.type === "text") {
content += part.text;
}
}
}
// Keep tool_calls structure for assistant messages
if (msg.role === "assistant" && msg.tool_calls && msg.tool_calls.length > 0) {
const assistantMsg = { role: "assistant" };
if (content) {
assistantMsg.content = content;
}
assistantMsg.tool_calls = msg.tool_calls;
// Attach pending tool results to assistant message with tool_calls
if (pendingToolResults.length > 0) {
assistantMsg.tool_results = pendingToolResults;
pendingToolResults = [];
}
result.push(assistantMsg);
} else if (content || pendingToolResults.length > 0) {
const msgObj = {
role: msg.role,
content: content || ""
};
// Attach pending tool results to this message
if (pendingToolResults.length > 0) {
msgObj.tool_results = pendingToolResults;
pendingToolResults = [];
}
result.push(msgObj);
if (msg.role === "assistant") {
const content = extractContent(msg.content) || "";
if (msg.tool_calls && msg.tool_calls.length > 0) {
// Strip `index` field — not needed in history, may confuse Cursor
const tool_calls = msg.tool_calls.map(({ index, ...tc }) => tc);
result.push({ role: "assistant", content, tool_calls });
} else if (content) {
result.push({ role: "assistant", content });
}
}
}
@@ -99,16 +60,14 @@ function convertMessages(messages) {
return result;
}
/**
* Transform OpenAI request to Cursor format
* Returns modified body with converted messages
*/
export function buildCursorRequest(model, body, stream, credentials) {
const messages = convertMessages(body.messages || []);
// Strip fields irrelevant to Cursor (OpenAI/Anthropic-specific)
const { user, metadata, tool_choice, stream_options, system, ...rest } = body;
return {
...body,
messages
...rest,
messages,
max_tokens: 32000
};
}

View File

@@ -64,6 +64,31 @@ const FIELD = {
TOOL_RESULT_INDEX: 3,
TOOL_RESULT_RAW_ARGS: 5,
TOOL_RESULT_RESULT: 8,
TOOL_RESULT_TOOL_CALL: 11,
TOOL_RESULT_MODEL_CALL_ID: 12,
// ClientSideToolV2Result
CV2R_TOOL: 1,
CV2R_MCP_RESULT: 28,
CV2R_CALL_ID: 35,
CV2R_MODEL_CALL_ID: 48,
CV2R_TOOL_INDEX: 49,
// MCPResult
MCPR_SELECTED_TOOL: 1,
MCPR_RESULT: 2,
// ClientSideToolV2Call
CV2C_TOOL: 1,
CV2C_MCP_PARAMS: 27,
CV2C_CALL_ID: 3,
CV2C_NAME: 9,
CV2C_RAW_ARGS: 10,
CV2C_TOOL_INDEX: 48,
CV2C_MODEL_CALL_ID: 49,
// ConversationMessage extra fields
MSG_SERVER_BUBBLE_ID: 32,
// Model
MODEL_NAME: 1,
@@ -175,26 +200,118 @@ function concatArrays(...arrays) {
// ==================== MESSAGE ENCODING ====================
export function encodeToolResult(toolResult) {
const toolCallId = toolResult.tool_call_id || "";
const toolName = toolResult.name || "";
const toolIndex = toolResult.index || 0;
const rawArgs = toolResult.raw_args || "{}";
// ClientSideToolV2 enum: MCP = 19
const CLIENT_SIDE_TOOL_V2_MCP = 19;
/**
* Format tool name: "toolName" → "mcp_custom_toolName"
*/
function formatToolName(name) {
if (name.startsWith("mcp_")) return name;
return `mcp_custom_${name}`;
}
/**
* Parse tool_call_id into { toolCallId, modelCallId }
* Cursor uses "\nmc_" delimiter for model_call_id
*/
function parseToolId(id) {
const delimiter = "\nmc_";
const idx = id.indexOf(delimiter);
if (idx >= 0) {
return { toolCallId: id.slice(0, idx), modelCallId: id.slice(idx + delimiter.length) };
}
return { toolCallId: id, modelCallId: null };
}
/**
* Encode MCPResult proto: { selected_tool, result }
*/
function encodeMcpResult(selectedTool, resultContent) {
return concatArrays(
encodeField(FIELD.TOOL_RESULT_CALL_ID, WIRE_TYPE.LEN, toolCallId),
encodeField(FIELD.TOOL_RESULT_NAME, WIRE_TYPE.LEN, toolName),
encodeField(FIELD.TOOL_RESULT_INDEX, WIRE_TYPE.VARINT, toolIndex),
encodeField(FIELD.TOOL_RESULT_RAW_ARGS, WIRE_TYPE.LEN, rawArgs)
encodeField(FIELD.MCPR_SELECTED_TOOL, WIRE_TYPE.LEN, selectedTool),
encodeField(FIELD.MCPR_RESULT, WIRE_TYPE.LEN, resultContent)
);
}
export function encodeMessage(content, role, messageId, chatModeEnum = null, isLast = false, hasTools = false, toolResults = []) {
/**
* Encode ClientSideToolV2Result proto
*/
function encodeClientSideToolV2Result(toolCallId, modelCallId, selectedTool, resultContent) {
return concatArrays(
encodeField(FIELD.CV2R_TOOL, WIRE_TYPE.VARINT, CLIENT_SIDE_TOOL_V2_MCP),
encodeField(FIELD.CV2R_MCP_RESULT, WIRE_TYPE.LEN, encodeMcpResult(selectedTool, resultContent)),
encodeField(FIELD.CV2R_CALL_ID, WIRE_TYPE.LEN, toolCallId),
...(modelCallId ? [encodeField(FIELD.CV2R_MODEL_CALL_ID, WIRE_TYPE.LEN, modelCallId)] : []),
encodeField(FIELD.CV2R_TOOL_INDEX, WIRE_TYPE.VARINT, 1)
);
}
/**
* Encode MCPParams.Tool nested inside ClientSideToolV2Call
*/
function encodeMcpParamsForCall(toolName, rawArgs, serverName) {
const tool = concatArrays(
encodeField(FIELD.MCP_TOOL_NAME, WIRE_TYPE.LEN, toolName),
encodeField(FIELD.MCP_TOOL_PARAMS, WIRE_TYPE.LEN, rawArgs),
encodeField(FIELD.MCP_TOOL_SERVER, WIRE_TYPE.LEN, serverName)
);
return encodeField(FIELD.MCP_TOOLS_LIST, WIRE_TYPE.LEN, tool);
}
/**
* Encode ClientSideToolV2Call proto
*/
function encodeClientSideToolV2Call(toolCallId, toolName, mcpToolName, rawArgs, modelCallId) {
return concatArrays(
encodeField(FIELD.CV2C_TOOL, WIRE_TYPE.VARINT, CLIENT_SIDE_TOOL_V2_MCP),
encodeField(FIELD.CV2C_MCP_PARAMS, WIRE_TYPE.LEN, encodeMcpParamsForCall(mcpToolName, rawArgs, "custom")),
encodeField(FIELD.CV2C_CALL_ID, WIRE_TYPE.LEN, toolCallId),
encodeField(FIELD.CV2C_NAME, WIRE_TYPE.LEN, toolName),
encodeField(FIELD.CV2C_RAW_ARGS, WIRE_TYPE.LEN, rawArgs),
encodeField(FIELD.CV2C_TOOL_INDEX, WIRE_TYPE.VARINT, 1),
...(modelCallId ? [encodeField(FIELD.CV2C_MODEL_CALL_ID, WIRE_TYPE.LEN, modelCallId)] : [])
);
}
/**
* Encode ConversationMessage.ToolResult with full structure
* Matches Cursor proto: tool_call_id, tool_name, tool_index, raw_args, result, tool_call
*/
export function encodeToolResult(toolResult) {
const originalName = toolResult.tool_name || toolResult.name || "";
const toolName = formatToolName(originalName);
const rawArgs = toolResult.raw_args || "{}";
const resultContent = toolResult.result_content || "";
const { toolCallId, modelCallId } = parseToolId(toolResult.tool_call_id || "");
// Derive mcpToolName: strip "mcp_" prefix → "custom_toolName"
const mcpToolName = toolName.startsWith("mcp_") ? toolName.slice(4) : originalName;
return concatArrays(
encodeField(FIELD.TOOL_RESULT_CALL_ID, WIRE_TYPE.LEN, toolCallId),
encodeField(FIELD.TOOL_RESULT_NAME, WIRE_TYPE.LEN, toolName),
encodeField(FIELD.TOOL_RESULT_INDEX, WIRE_TYPE.VARINT, toolResult.tool_index || 1),
...(modelCallId ? [encodeField(FIELD.TOOL_RESULT_MODEL_CALL_ID, WIRE_TYPE.LEN, modelCallId)] : []),
encodeField(FIELD.TOOL_RESULT_RAW_ARGS, WIRE_TYPE.LEN, rawArgs),
encodeField(FIELD.TOOL_RESULT_RESULT, WIRE_TYPE.LEN,
encodeClientSideToolV2Result(toolCallId, modelCallId, mcpToolName, resultContent)
),
encodeField(FIELD.TOOL_RESULT_TOOL_CALL, WIRE_TYPE.LEN,
encodeClientSideToolV2Call(toolCallId, toolName, mcpToolName, rawArgs, modelCallId)
)
);
}
export function encodeMessage(content, role, messageId, chatModeEnum = null, isLast = false, hasTools = false, toolResults = [], serverBubbleId = null) {
const hasToolResults = toolResults.length > 0;
return concatArrays(
encodeField(FIELD.MSG_CONTENT, WIRE_TYPE.LEN, content),
encodeField(FIELD.MSG_ROLE, WIRE_TYPE.VARINT, role),
encodeField(FIELD.MSG_ID, WIRE_TYPE.LEN, messageId),
...(toolResults.length > 0 ? toolResults.map(tr =>
// Only include server_bubble_id if explicitly provided (last assistant message only)
...(serverBubbleId ? [encodeField(FIELD.MSG_SERVER_BUBBLE_ID, WIRE_TYPE.LEN, serverBubbleId)] : []),
...(hasToolResults ? toolResults.map(tr =>
encodeField(FIELD.MSG_TOOL_RESULTS, WIRE_TYPE.LEN, encodeToolResult(tr))
) : []),
encodeField(FIELD.MSG_IS_AGENTIC, WIRE_TYPE.VARINT, hasTools ? 1 : 0),
@@ -344,6 +461,45 @@ export function buildChatRequest(messages, modelName, tools = [], reasoningEffor
return encodeField(FIELD.REQUEST, WIRE_TYPE.LEN, encodeRequest(messages, modelName, tools, reasoningEffort));
}
/**
* Encode a tool result as ClientSideToolV2Result (field 2 of StreamUnifiedChatRequestWithTools)
* This is sent as a SEPARATE request frame, not inside conversation messages.
* Proto: StreamUnifiedChatRequestWithTools.client_side_tool_v2_result = 2
*/
export function buildToolResultRequest(toolResult) {
const { toolCallId, modelCallId } = parseToolId(toolResult.tool_call_id || "");
const rawName = toolResult.tool_name || "";
const resultContent = toolResult.result_content || "";
// selected_tool = raw tool name (e.g. "Write", "Read") per cursor-api Rust source:
// McpResult { selected_tool: tool_name, result } where tool_name is the mcpParams.tools[0].name
// which is the name AFTER server prefix stripping (e.g. "custom_Write" -> name = "Write")
// Actually cursor-api uses: name = tool_name.slice_unchecked(d+1..) → raw name without "custom_"
// So selected_tool = raw tool name without any prefix
const selectedTool = rawName.startsWith("mcp_custom_")
? rawName.slice("mcp_custom_".length)
: rawName.startsWith("mcp_")
? rawName.slice(4)
: rawName;
// ClientSideToolV2Result per proto:
// field 1 (tool): varint = 19 (MCP)
// field 28 (mcp_result): LEN { field 1: selected_tool, field 2: result }
// field 35 (tool_call_id): string
// field 48 (model_call_id): string (optional)
// NO tool_index (None in Rust source: encode_tool_result sets tool_index: None)
const cv2Result = concatArrays(
encodeField(FIELD.CV2R_TOOL, WIRE_TYPE.VARINT, CLIENT_SIDE_TOOL_V2_MCP),
encodeField(FIELD.CV2R_MCP_RESULT, WIRE_TYPE.LEN, encodeMcpResult(selectedTool, resultContent)),
encodeField(FIELD.CV2R_CALL_ID, WIRE_TYPE.LEN, toolCallId),
...(modelCallId ? [encodeField(FIELD.CV2R_MODEL_CALL_ID, WIRE_TYPE.LEN, modelCallId)] : [])
// tool_index intentionally omitted (None per Rust source)
);
// StreamUnifiedChatRequestWithTools: field 2 = client_side_tool_v2_result
return encodeField(2, WIRE_TYPE.LEN, cv2Result);
}
export function wrapConnectRPCFrame(payload, compress = false) {
let finalPayload = payload;
let flags = 0x00;
@@ -374,6 +530,15 @@ export function generateCursorBody(messages, modelName, tools = [], reasoningEff
return framed;
}
/**
* Generate a framed tool result body to send as a separate request frame.
* Uses field 2 (client_side_tool_v2_result) of StreamUnifiedChatRequestWithTools.
*/
export function generateToolResultBody(toolResult) {
const protobuf = buildToolResultRequest(toolResult);
return wrapConnectRPCFrame(protobuf, false);
}
// ==================== PRIMITIVE DECODING ====================
export function decodeVarint(buffer, offset) {

View File

@@ -20,7 +20,7 @@ async function ensureNodeModules() {
}
}
// Format timestamp for folder name: 20251228_143045
// Format timestamp for folder name: 20251228_143045_123
function formatTimestamp(date = new Date()) {
const pad = (n) => String(n).padStart(2, "0");
const y = date.getFullYear();
@@ -29,7 +29,8 @@ function formatTimestamp(date = new Date()) {
const h = pad(date.getHours());
const min = pad(date.getMinutes());
const s = pad(date.getSeconds());
return `${y}${m}${d}_${h}${min}${s}`;
const ms = String(date.getMilliseconds()).padStart(3, "0");
return `${y}${m}${d}_${h}${min}${s}_${ms}`;
}
// Create log session folder: {sourceFormat}_{targetFormat}_{model}_{timestamp}

View File

@@ -1,6 +1,6 @@
{
"name": "9router-app",
"version": "0.2.91",
"version": "0.2.92",
"description": "9Router web dashboard",
"private": true,
"scripts": {

View File

@@ -130,8 +130,8 @@ export const CLI_TOOLS = {
configType: "mitm",
modelAliases: ["claude-opus-4-6-thinking", "claude-sonnet-4-6", "gemini-3-flash", "gpt-oss-120b-medium", "gemini-3-pro-high", "gemini-3-pro-low"],
defaultModels: [
{ id: "gemini-3-pro-high", name: "Gemini 3 Pro High", alias: "gemini-3-pro-high" },
{ id: "gemini-3-pro-low", name: "Gemini 3 Pro Low", alias: "gemini-3-pro-low" },
{ id: "gemini-3.1-pro-high", name: "Gemini 3.1 Pro High", alias: "gemini-3.1-pro-high" },
{ id: "gemini-3.1-pro-low", name: "Gemini 3.1 Pro Low", alias: "gemini-3.1-pro-low" },
{ id: "gemini-3-flash", name: "Gemini 3 Flash", alias: "gemini-3-flash" },
{ id: "claude-sonnet-4-6", name: "Claude Sonnet 4.6", alias: "claude-sonnet-4-6" },
{ id: "claude-opus-4-6-thinking", name: "Claude Opus 4.6 Thinking", alias: "claude-opus-4-6-thinking" },

View File

@@ -307,14 +307,14 @@ export const DEFAULT_PRICING = {
// Antigravity (ag) - User-provided pricing
ag: {
"gemini-3-pro-low": {
"gemini-3.1-pro-low": {
input: 2.00,
output: 12.00,
cached: 0.25,
reasoning: 18.00,
cache_creation: 2.00
},
"gemini-3-pro-high": {
"gemini-3.1-pro-high": {
input: 4.00,
output: 18.00,
cached: 0.50,