Files
9router/open-sse/handlers/chatCore.js
2026-01-14 15:42:38 +07:00

317 lines
11 KiB
JavaScript

import { detectFormat, getTargetFormat } from "../services/provider.js";
import { translateRequest, needsTranslation } from "../translator/index.js";
import { FORMATS } from "../translator/formats.js";
import { createSSETransformStreamWithLogger, createPassthroughStreamWithLogger, COLORS } from "../utils/stream.js";
import { createStreamController, pipeWithDisconnect } from "../utils/streamHandler.js";
import { refreshWithRetry } from "../services/tokenRefresh.js";
import { createRequestLogger } from "../utils/requestLogger.js";
import { getModelTargetFormat, PROVIDER_ID_TO_ALIAS } from "../config/providerModels.js";
import { createErrorResult, parseUpstreamError, formatProviderError } from "../utils/error.js";
import { handleBypassRequest } from "../utils/bypassHandler.js";
import { saveRequestUsage, trackPendingRequest, appendRequestLog } from "@/lib/usageDb.js";
import { getExecutor } from "../executors/index.js";
/**
* Extract usage from non-streaming response body
* Handles different provider response formats
*/
function extractUsageFromResponse(responseBody, provider) {
if (!responseBody) return null;
// OpenAI format
if (responseBody.usage) {
return {
prompt_tokens: responseBody.usage.prompt_tokens || 0,
completion_tokens: responseBody.usage.completion_tokens || 0,
cached_tokens: responseBody.usage.prompt_tokens_details?.cached_tokens,
reasoning_tokens: responseBody.usage.completion_tokens_details?.reasoning_tokens
};
}
// Claude format
if (responseBody.usage?.input_tokens !== undefined || responseBody.usage?.output_tokens !== undefined) {
return {
prompt_tokens: responseBody.usage.input_tokens || 0,
completion_tokens: responseBody.usage.output_tokens || 0,
cache_read_input_tokens: responseBody.usage.cache_read_input_tokens,
cache_creation_input_tokens: responseBody.usage.cache_creation_input_tokens
};
}
// Gemini format
if (responseBody.usageMetadata) {
return {
prompt_tokens: responseBody.usageMetadata.promptTokenCount || 0,
completion_tokens: responseBody.usageMetadata.candidatesTokenCount || 0,
reasoning_tokens: responseBody.usageMetadata.thoughtsTokenCount
};
}
return null;
}
/**
* Core chat handler - shared between SSE and Worker
* Returns { success, response, status, error } for caller to handle fallback
* @param {object} options
* @param {object} options.body - Request body
* @param {object} options.modelInfo - { provider, model }
* @param {object} options.credentials - Provider credentials
* @param {object} options.log - Logger instance (optional)
* @param {function} options.onCredentialsRefreshed - Callback when credentials are refreshed
* @param {function} options.onRequestSuccess - Callback when request succeeds (to clear error status)
* @param {function} options.onDisconnect - Callback when client disconnects
* @param {string} options.connectionId - Connection ID for usage tracking
*/
export async function handleChatCore({ body, modelInfo, credentials, log, onCredentialsRefreshed, onRequestSuccess, onDisconnect, clientRawRequest, connectionId, userAgent }) {
const { provider, model } = modelInfo;
const sourceFormat = detectFormat(body);
// Check for bypass patterns (warmup, skip) - return fake response
const bypassResponse = handleBypassRequest(body, model, userAgent);
if (bypassResponse) {
return bypassResponse;
}
// Detect source format and get target format
// Model-specific targetFormat takes priority over provider default
const alias = PROVIDER_ID_TO_ALIAS[provider] || provider;
const modelTargetFormat = getModelTargetFormat(alias, model);
const targetFormat = modelTargetFormat || getTargetFormat(provider);
const stream = body.stream !== false;
// Create request logger for this session: sourceFormat_targetFormat_model
const reqLogger = await createRequestLogger(sourceFormat, targetFormat, model);
// 0. Log client raw request (before any conversion)
if (clientRawRequest) {
reqLogger.logClientRawRequest(
clientRawRequest.endpoint,
clientRawRequest.body,
clientRawRequest.headers
);
}
// 1. Log raw request from client
reqLogger.logRawRequest(body);
// 1a. Log format detection info
reqLogger.logFormatInfo({
sourceFormat,
targetFormat,
provider,
model,
stream
});
log?.debug?.("FORMAT", `${sourceFormat}${targetFormat} | stream=${stream}`);
// Translate request
let translatedBody = body;
translatedBody = translateRequest(sourceFormat, targetFormat, model, body, stream, credentials, provider);
// Extract toolNameMap for response translation (Claude OAuth)
const toolNameMap = translatedBody._toolNameMap;
delete translatedBody._toolNameMap;
// Update model in body
translatedBody.model = model;
// Get executor for this provider
const executor = getExecutor(provider);
// Track pending request
trackPendingRequest(model, provider, connectionId, true);
// Log start
appendRequestLog({ model, provider, connectionId, status: "PENDING" }).catch(() => {});
const msgCount = translatedBody.messages?.length
|| translatedBody.contents?.length
|| translatedBody.request?.contents?.length
|| 0;
log?.debug?.("REQUEST", `${provider.toUpperCase()} | ${model} | ${msgCount} msgs`);
// Create stream controller for disconnect detection
const streamController = createStreamController({ onDisconnect, log, provider, model });
// Execute request using executor (handles URL building, headers, fallback, transform)
let providerResponse;
let providerUrl;
let providerHeaders;
let finalBody;
try {
const result = await executor.execute({
model,
body: translatedBody,
stream,
credentials,
signal: streamController.signal,
log
});
providerResponse = result.response;
providerUrl = result.url;
providerHeaders = result.headers;
finalBody = result.transformedBody;
// Log converted request
reqLogger.logConvertedRequest(providerUrl, providerHeaders, finalBody);
} catch (error) {
trackPendingRequest(model, provider, connectionId, false);
appendRequestLog({ model, provider, connectionId, status: `FAILED ${error.name === "AbortError" ? 499 : 502}` }).catch(() => {});
if (error.name === "AbortError") {
streamController.handleError(error);
return createErrorResult(499, "Request aborted");
}
const errMsg = formatProviderError(error, provider, model, 502);
console.log(`${COLORS.red}[ERROR] ${errMsg}${COLORS.reset}`);
return createErrorResult(502, errMsg);
}
// Handle 401/403 - try token refresh using executor
if (providerResponse.status === 401 || providerResponse.status === 403) {
const newCredentials = await refreshWithRetry(
() => executor.refreshCredentials(credentials, log),
3,
log
);
if (newCredentials?.accessToken || newCredentials?.copilotToken) {
log?.info?.("TOKEN", `${provider.toUpperCase()} | refreshed`);
// Update credentials
Object.assign(credentials, newCredentials);
// Notify caller about refreshed credentials
if (onCredentialsRefreshed && newCredentials) {
await onCredentialsRefreshed(newCredentials);
}
// Retry with new credentials
try {
const retryResult = await executor.execute({
model,
body: translatedBody,
stream,
credentials,
signal: streamController.signal,
log
});
if (retryResult.response.ok) {
providerResponse = retryResult.response;
providerUrl = retryResult.url;
}
} catch (retryError) {
log?.warn?.("TOKEN", `${provider.toUpperCase()} | retry after refresh failed`);
}
} else {
log?.warn?.("TOKEN", `${provider.toUpperCase()} | refresh failed`);
}
}
// Check provider response - return error info for fallback handling
if (!providerResponse.ok) {
trackPendingRequest(model, provider, connectionId, false);
const { statusCode, message, retryAfterMs } = await parseUpstreamError(providerResponse, provider);
appendRequestLog({ model, provider, connectionId, status: `FAILED ${statusCode}` }).catch(() => {});
const errMsg = formatProviderError(new Error(message), provider, model, statusCode);
console.log(`${COLORS.red}[ERROR] ${errMsg}${COLORS.reset}`);
// Log Antigravity retry time if available
if (retryAfterMs && provider === "antigravity") {
const retrySeconds = Math.ceil(retryAfterMs / 1000);
log?.debug?.("RETRY", `Antigravity quota reset in ${retrySeconds}s (${retryAfterMs}ms)`);
}
// Log error with full request body for debugging
reqLogger.logError(new Error(message), finalBody || translatedBody);
return createErrorResult(statusCode, errMsg, retryAfterMs);
}
// Non-streaming response
if (!stream) {
trackPendingRequest(model, provider, connectionId, false);
const responseBody = await providerResponse.json();
// Notify success - caller can clear error status if needed
if (onRequestSuccess) {
await onRequestSuccess();
}
// Log usage for non-streaming responses
const usage = extractUsageFromResponse(responseBody, provider);
appendRequestLog({ model, provider, connectionId, tokens: usage, status: "200 OK" }).catch(() => {});
if (usage) {
const msg = `[${new Date().toLocaleTimeString("en-US", { hour12: false, hour: "2-digit", minute: "2-digit" })}] 📊 [USAGE] ${provider.toUpperCase()} | in=${usage.prompt_tokens || 0} | out=${usage.completion_tokens || 0}${connectionId ? ` | account=${connectionId.slice(0, 8)}...` : ""}`;
console.log(`${COLORS.green}${msg}${COLORS.reset}`);
saveRequestUsage({
provider: provider || "unknown",
model: model || "unknown",
tokens: usage,
timestamp: new Date().toISOString(),
connectionId: connectionId || undefined
}).catch(err => {
console.error("Failed to save usage stats:", err.message);
});
}
return {
success: true,
response: new Response(JSON.stringify(responseBody), {
headers: {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*"
}
})
};
}
// Streaming response
// Notify success - caller can clear error status if needed
if (onRequestSuccess) {
await onRequestSuccess();
}
const responseHeaders = {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*"
};
// Create transform stream with logger for streaming response
let transformStream;
if (needsTranslation(targetFormat, sourceFormat)) {
transformStream = createSSETransformStreamWithLogger(targetFormat, sourceFormat, provider, reqLogger, toolNameMap, model, connectionId);
} else {
transformStream = createPassthroughStreamWithLogger(provider, reqLogger, model, connectionId);
}
// Pipe response through transform with disconnect detection
const transformedBody = pipeWithDisconnect(providerResponse, transformStream, streamController);
return {
success: true,
response: new Response(transformedBody, {
headers: responseHeaders
})
};
}
/**
* Check if token is expired or about to expire
*/
export function isTokenExpiringSoon(expiresAt, bufferMs = 5 * 60 * 1000) {
if (!expiresAt) return false;
const expiresAtMs = new Date(expiresAt).getTime();
return expiresAtMs - Date.now() < bufferMs;
}