feat: Improve Antigravity quota monitoring and fix Droid CLI compatibility

This commit is contained in:
decolua
2026-02-05 22:15:26 +07:00
parent 32aefe5a76
commit 3c65e0c5f2
6 changed files with 41 additions and 31 deletions

View File

@@ -37,6 +37,7 @@ export class CodexExecutor extends BaseExecutor {
delete body.prompt_cache_retention; // Cursor sends this but Codex doesn't support it
delete body.metadata; // Cursor sends this but Codex doesn't support it
delete body.stream_options; // Cursor sends this but Codex doesn't support it
delete body.safety_identifier; // Droid CLI sends this but Codex doesn't support it
return body;
}

View File

@@ -470,7 +470,8 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
let transformStream;
// For Codex provider, always translate response from openai-responses to openai format
// This ensures clients like Cursor get the expected chat completions format
const needsCodexTranslation = (provider === 'codex' || provider === 'openai') && targetFormat === 'openai-responses';
// BUT: skip translation if client already sent in openai-responses format (like Droid CLI)
const needsCodexTranslation = (provider === 'codex' || provider === 'openai') && targetFormat === 'openai-responses' && sourceFormat !== 'openai-responses';
if (needsCodexTranslation || needsTranslation(targetFormat, sourceFormat)) {
// For Codex, translate FROM openai-responses TO openai (client's expected format)
const responseSourceFormat = needsCodexTranslation ? 'openai-responses' : targetFormat;

View File

@@ -23,7 +23,7 @@ import { createResponsesApiTransformStream } from "../transformer/responsesTrans
export async function handleResponsesCore({ body, modelInfo, credentials, log, onCredentialsRefreshed, onRequestSuccess, onDisconnect, connectionId }) {
// Convert Responses API format to Chat Completions format
const convertedBody = convertResponsesApiFormat(body);
// Ensure stream is enabled
convertedBody.stream = true;

View File

@@ -20,7 +20,11 @@ export function convertResponsesApiFormat(body) {
let pendingToolResults = [];
for (const item of body.input) {
if (item.type === "message") {
// Determine item type - Droid may send items without 'type' field
// If no type but has role, treat as message
const itemType = item.type || (item.role ? "message" : null);
if (itemType === "message") {
// Flush any pending assistant message with tool calls
if (currentAssistantMsg) {
result.messages.push(currentAssistantMsg);
@@ -37,14 +41,14 @@ export function convertResponsesApiFormat(body) {
// Convert content: input_text → text, output_text → text
const content = Array.isArray(item.content)
? item.content.map(c => {
if (c.type === "input_text") return { type: "text", text: c.text };
if (c.type === "output_text") return { type: "text", text: c.text };
return c;
})
if (c.type === "input_text") return { type: "text", text: c.text };
if (c.type === "output_text") return { type: "text", text: c.text };
return c;
})
: item.content;
result.messages.push({ role: item.role, content });
}
else if (item.type === "function_call") {
}
else if (itemType === "function_call") {
// Start or append to assistant message with tool_calls
if (!currentAssistantMsg) {
currentAssistantMsg = {
@@ -62,7 +66,7 @@ export function convertResponsesApiFormat(body) {
}
});
}
else if (item.type === "function_call_output") {
else if (itemType === "function_call_output") {
// Flush assistant message first if exists
if (currentAssistantMsg) {
result.messages.push(currentAssistantMsg);
@@ -75,7 +79,7 @@ export function convertResponsesApiFormat(body) {
content: typeof item.output === "string" ? item.output : JSON.stringify(item.output)
});
}
else if (item.type === "reasoning") {
else if (itemType === "reasoning") {
// Skip reasoning items - they are for display only
continue;
}

View File

@@ -26,7 +26,11 @@ export function openaiResponsesToOpenAIRequest(model, body, stream, credentials)
let pendingToolResults = [];
for (const item of body.input) {
if (item.type === "message") {
// Determine item type - Droid may send items without 'type' field
// If no type but has role, treat as message
const itemType = item.type || (item.role ? "message" : null);
if (itemType === "message") {
// Flush any pending assistant message with tool calls
if (currentAssistantMsg) {
result.messages.push(currentAssistantMsg);
@@ -43,14 +47,14 @@ export function openaiResponsesToOpenAIRequest(model, body, stream, credentials)
// Convert content: input_text → text, output_text → text
const content = Array.isArray(item.content)
? item.content.map(c => {
if (c.type === "input_text") return { type: "text", text: c.text };
if (c.type === "output_text") return { type: "text", text: c.text };
return c;
})
if (c.type === "input_text") return { type: "text", text: c.text };
if (c.type === "output_text") return { type: "text", text: c.text };
return c;
})
: item.content;
result.messages.push({ role: item.role, content });
}
else if (item.type === "function_call") {
}
else if (itemType === "function_call") {
// Start or append to assistant message with tool_calls
if (!currentAssistantMsg) {
currentAssistantMsg = {
@@ -68,7 +72,7 @@ export function openaiResponsesToOpenAIRequest(model, body, stream, credentials)
}
});
}
else if (item.type === "function_call_output") {
else if (itemType === "function_call_output") {
// Flush assistant message first if exists
if (currentAssistantMsg) {
result.messages.push(currentAssistantMsg);
@@ -88,7 +92,7 @@ export function openaiResponsesToOpenAIRequest(model, body, stream, credentials)
content: typeof item.output === "string" ? item.output : JSON.stringify(item.output)
});
}
else if (item.type === "reasoning") {
else if (itemType === "reasoning") {
// Skip reasoning items - they are for display only
continue;
}
@@ -159,14 +163,14 @@ export function openaiToOpenAIResponsesRequest(model, body, stream, credentials)
// Convert user/assistant messages to input items
if (msg.role === "user" || msg.role === "assistant") {
const contentType = msg.role === "user" ? "input_text" : "output_text";
const content = typeof msg.content === "string"
const content = typeof msg.content === "string"
? [{ type: contentType, text: msg.content }]
: Array.isArray(msg.content)
? msg.content.map(c => {
if (c.type === "text") return { type: contentType, text: c.text };
if (c.type === "image_url") return { type: contentType, text: "[Image content]" };
return c;
})
if (c.type === "text") return { type: contentType, text: c.text };
if (c.type === "image_url") return { type: contentType, text: "[Image content]" };
return c;
})
: [];
result.input.push({

View File

@@ -116,7 +116,7 @@ function openaiToGeminiBase(model, body, stream) {
// Check if there are actual tool responses in the next messages
const hasActualResponses = toolCallIds.some(fid => toolResponses[fid]);
if (hasActualResponses) {
const toolParts = [];
for (const fid of toolCallIds) {
@@ -305,7 +305,7 @@ function wrapInCloudCodeEnvelopeForClaude(model, claudeRequest, credentials = nu
if (claudeRequest.messages && Array.isArray(claudeRequest.messages)) {
for (const msg of claudeRequest.messages) {
const parts = [];
if (Array.isArray(msg.content)) {
for (const block of msg.content) {
if (block.type === "text") {
@@ -369,7 +369,7 @@ function wrapInCloudCodeEnvelopeForClaude(model, claudeRequest, credentials = nu
// Add system instruction (Antigravity default)
const defaultPart = { text: ANTIGRAVITY_DEFAULT_SYSTEM };
const systemParts = [defaultPart];
if (claudeRequest.system) {
if (Array.isArray(claudeRequest.system)) {
for (const block of claudeRequest.system) {
@@ -379,7 +379,7 @@ function wrapInCloudCodeEnvelopeForClaude(model, claudeRequest, credentials = nu
systemParts.push({ text: claudeRequest.system });
}
}
envelope.request.systemInstruction = { role: "user", parts: systemParts };
return envelope;
@@ -388,12 +388,12 @@ function wrapInCloudCodeEnvelopeForClaude(model, claudeRequest, credentials = nu
// OpenAI -> Antigravity (Sandbox Cloud Code with wrapper)
export function openaiToAntigravityRequest(model, body, stream, credentials = null) {
const isClaude = model.toLowerCase().includes("claude");
if (isClaude) {
const claudeRequest = openaiToClaudeRequestForAntigravity(model, body, stream);
return wrapInCloudCodeEnvelopeForClaude(model, claudeRequest, credentials);
}
const geminiCLI = openaiToGeminiCLIRequest(model, body, stream);
return wrapInCloudCodeEnvelope(model, geminiCLI, credentials, true);
}