feat: cherry-pick PR #183 — multi-provider support, PWA, dynamic models, UI improvements

Cherry-picked from decolua/9router PR #183.
Note: open-sse changes included but need further review due to extensive modifications.

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
Quan
2026-02-25 11:40:50 +07:00
committed by decolua
parent 147fc168f9
commit 07717bad60
30 changed files with 773 additions and 159 deletions

3
.gitignore vendored
View File

@@ -65,3 +65,6 @@ package-lock.json
#Ignore vscode AI rules
.github/instructions/codacy.instructions.md
README1.md
deploy.sh
ecosystem.config.*
start.sh

View File

@@ -4,9 +4,7 @@ const nextConfig = {
images: {
unoptimized: true
},
env: {
NEXT_PUBLIC_CLOUD_URL: "https://9router.com",
},
env: {},
webpack: (config, { isServer }) => {
// Ignore fs/path modules in browser bundle
if (!isServer) {

View File

@@ -1,5 +1,13 @@
import { platform, arch } from "os";
// === GitHub Copilot Version Constants ===
export const GITHUB_COPILOT = {
VSCODE_VERSION: "1.110.0",
COPILOT_CHAT_VERSION: "0.38.0",
USER_AGENT: "GitHubCopilotChat/0.38.0",
API_VERSION: "2025-04-01",
};
// === Antigravity Binary Alignment: Numeric Enums ===
// Reference: Antigravity binary analysis - google.internal.cloud.code.v1internal.ClientMetadata
@@ -53,7 +61,7 @@ export function getPlatformEnum() {
export function getPlatformUserAgent() {
const os = platform();
const architecture = arch();
return `antigravity/1.16.5 ${os}/${architecture}`;
return `antigravity/1.104.0 ${os}/${architecture}`;
}
// Centralized client metadata (used in request bodies for loadCodeAssist, onboardUser, etc.)
@@ -71,7 +79,8 @@ export const INTERNAL_REQUEST_HEADER = { name: "x-request-source", value: "local
export const ANTIGRAVITY_HEADERS = {
"X-Client-Name": "antigravity",
"X-Client-Version": "1.107.0",
"x-goog-api-client": "gl-node/18.18.2 fire/0.8.6 grpc/1.10.x"
"x-goog-api-client": "gl-node/18.18.2 fire/0.8.6 grpc/1.10.x",
"User-Agent": "antigravity/1.107.0 darwin/arm64"
};
// Cloud Code Assist API endpoints (for Project ID discovery)
@@ -220,11 +229,11 @@ export const PROVIDERS = {
format: "openai", // GitHub Copilot uses OpenAI-compatible format
headers: {
"copilot-integration-id": "vscode-chat",
"editor-version": "vscode/1.107.1",
"editor-plugin-version": "copilot-chat/0.26.7",
"user-agent": "GitHubCopilotChat/0.26.7",
"editor-version": `vscode/${GITHUB_COPILOT.VSCODE_VERSION}`,
"editor-plugin-version": `copilot-chat/${GITHUB_COPILOT.COPILOT_CHAT_VERSION}`,
"user-agent": GITHUB_COPILOT.USER_AGENT,
"openai-intent": "conversation-panel",
"x-github-api-version": "2025-04-01",
"x-github-api-version": GITHUB_COPILOT.API_VERSION,
"x-vscode-user-agent-library-version": "electron-fetch",
"X-Initiator": "user",
"Accept": "application/json",
@@ -283,6 +292,82 @@ export const PROVIDERS = {
},
tokenUrl: "https://api.cline.bot/api/v1/auth/token",
refreshUrl: "https://api.cline.bot/api/v1/auth/refresh"
},
nvidia: {
baseUrl: "https://integrate.api.nvidia.com/v1/chat/completions",
format: "openai"
},
anthropic: {
baseUrl: "https://api.anthropic.com/v1/messages",
format: "claude",
headers: {
"Anthropic-Version": "2023-06-01",
"Anthropic-Beta": "claude-code-20250219,interleaved-thinking-2025-05-14"
}
},
deepseek: {
baseUrl: "https://api.deepseek.com/chat/completions",
format: "openai"
},
groq: {
baseUrl: "https://api.groq.com/openai/v1/chat/completions",
format: "openai"
},
xai: {
baseUrl: "https://api.x.ai/v1/chat/completions",
format: "openai"
},
mistral: {
baseUrl: "https://api.mistral.ai/v1/chat/completions",
format: "openai"
},
perplexity: {
baseUrl: "https://api.perplexity.ai/chat/completions",
format: "openai"
},
together: {
baseUrl: "https://api.together.xyz/v1/chat/completions",
format: "openai"
},
fireworks: {
baseUrl: "https://api.fireworks.ai/inference/v1/chat/completions",
format: "openai"
},
cerebras: {
baseUrl: "https://api.cerebras.ai/v1/chat/completions",
format: "openai"
},
cohere: {
baseUrl: "https://api.cohere.ai/v1/chat/completions",
format: "openai"
},
nebius: {
baseUrl: "https://api.studio.nebius.ai/v1/chat/completions",
format: "openai"
},
siliconflow: {
baseUrl: "https://api.siliconflow.cn/v1/chat/completions",
format: "openai"
},
hyperbolic: {
baseUrl: "https://api.hyperbolic.xyz/v1/chat/completions",
format: "openai"
},
deepgram: {
baseUrl: "https://api.deepgram.com/v1/listen",
format: "openai"
},
assemblyai: {
baseUrl: "https://api.assemblyai.com/v1/audio/transcriptions",
format: "openai"
},
nanobanana: {
baseUrl: "https://api.nanobananaapi.ai/v1/chat/completions",
format: "openai"
},
chutes: {
baseUrl: "https://llm.chutes.ai/v1/chat/completions",
format: "openai"
}
};

View File

@@ -34,9 +34,6 @@ export const PROVIDER_MODELS = {
gc: [ // Gemini CLI
{ id: "gemini-3-flash-preview", name: "Gemini 3 Flash Preview" },
{ id: "gemini-3-pro-preview", name: "Gemini 3 Pro Preview" },
{ id: "gemini-2.5-pro", name: "Gemini 2.5 Pro" },
{ id: "gemini-2.5-flash", name: "Gemini 2.5 Flash" },
{ id: "gemini-2.5-flash-lite", name: "Gemini 2.5 Flash Lite" },
],
qw: [ // Qwen Code
// { id: "qwen3-coder-next", name: "Qwen3 Coder Next" },

View File

@@ -21,11 +21,11 @@ export class AntigravityExecutor extends BaseExecutor {
return {
"Content-Type": "application/json",
"Authorization": `Bearer ${credentials.accessToken}`,
"User-Agent": this.config.headers?.["User-Agent"] || "antigravity/1.104.0 darwin/arm64",
"User-Agent": this.config.headers?.["User-Agent"] || ANTIGRAVITY_HEADERS['User-Agent'],
[INTERNAL_REQUEST_HEADER.name]: INTERNAL_REQUEST_HEADER.value,
...ANTIGRAVITY_HEADERS,
...(sessionId && { "X-Machine-Session-Id": sessionId }),
...(stream && { "Accept": "text/event-stream" })
"Accept": stream ? "text/event-stream" : "application/json"
};
}

View File

@@ -1,5 +1,5 @@
import { BaseExecutor } from "./base.js";
import { PROVIDERS, OAUTH_ENDPOINTS, HTTP_STATUS } from "../config/constants.js";
import { PROVIDERS, OAUTH_ENDPOINTS, HTTP_STATUS, GITHUB_COPILOT } from "../config/constants.js";
import { openaiToOpenAIResponsesRequest } from "../translator/request/openai-responses.js";
import { openaiResponsesToOpenAIResponse } from "../translator/response/openai-responses.js";
import { initState } from "../translator/index.js";
@@ -22,11 +22,11 @@ export class GithubExecutor extends BaseExecutor {
"Authorization": `Bearer ${token}`,
"Content-Type": "application/json",
"copilot-integration-id": "vscode-chat",
"editor-version": "vscode/1.107.1",
"editor-plugin-version": "copilot-chat/0.26.7",
"user-agent": "GitHubCopilotChat/0.26.7",
"editor-version": `vscode/${GITHUB_COPILOT.VSCODE_VERSION}`,
"editor-plugin-version": `copilot-chat/${GITHUB_COPILOT.COPILOT_CHAT_VERSION}`,
"user-agent": GITHUB_COPILOT.USER_AGENT,
"openai-intent": "conversation-panel",
"x-github-api-version": "2025-04-01",
"x-github-api-version": GITHUB_COPILOT.API_VERSION,
"x-request-id": crypto.randomUUID?.() || `${Date.now()}-${Math.random().toString(36).slice(2)}`,
"x-vscode-user-agent-library-version": "electron-fetch",
"X-Initiator": "user",
@@ -46,7 +46,7 @@ export class GithubExecutor extends BaseExecutor {
if (result.response.status === HTTP_STATUS.BAD_REQUEST) {
const errorBody = await result.response.clone().text();
if (errorBody.includes("not accessible via the /chat/completions endpoint")) {
log?.warn("GITHUB", `Model ${model} requires /responses. Switching...`);
this.knownCodexModels.add(model);
@@ -60,7 +60,7 @@ export class GithubExecutor extends BaseExecutor {
async executeWithResponsesEndpoint({ model, body, stream, credentials, signal, log }) {
const url = this.config.responsesUrl;
const headers = this.buildHeaders(credentials, stream);
const transformedBody = openaiToOpenAIResponsesRequest(model, body, stream, credentials);
log?.debug("GITHUB", "Sending translated request to /responses");
@@ -86,7 +86,7 @@ export class GithubExecutor extends BaseExecutor {
async transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() || "";
for (const line of lines) {
@@ -110,13 +110,13 @@ export class GithubExecutor extends BaseExecutor {
},
flush(controller) {
if (buffer.trim()) {
const parsed = parseSSELine(buffer.trim());
if (parsed && !parsed.done) {
const converted = openaiResponsesToOpenAIResponse(parsed, state);
if (converted) {
controller.enqueue(new TextEncoder().encode(formatSSE(converted, "openai")));
}
}
const parsed = parseSSELine(buffer.trim());
if (parsed && !parsed.done) {
const converted = openaiResponsesToOpenAIResponse(parsed, state);
if (converted) {
controller.enqueue(new TextEncoder().encode(formatSSE(converted, "openai")));
}
}
}
}
});
@@ -140,13 +140,18 @@ export class GithubExecutor extends BaseExecutor {
const response = await fetch("https://api.github.com/copilot_internal/v2/token", {
headers: {
"Authorization": `token ${githubAccessToken}`,
"User-Agent": "GithubCopilot/1.0",
"Editor-Version": "vscode/1.100.0",
"Editor-Plugin-Version": "copilot/1.300.0",
"Accept": "application/json"
"User-Agent": GITHUB_COPILOT.USER_AGENT,
"Editor-Version": `vscode/${GITHUB_COPILOT.VSCODE_VERSION}`,
"Editor-Plugin-Version": `copilot-chat/${GITHUB_COPILOT.COPILOT_CHAT_VERSION}`,
"Accept": "application/json",
"x-github-api-version": GITHUB_COPILOT.API_VERSION
}
});
if (!response.ok) return null;
if (!response.ok) {
const errorText = await response.text();
log?.error?.("TOKEN", `Copilot token refresh failed: ${response.status} ${errorText}`);
return null;
}
const data = await response.json();
log?.info?.("TOKEN", "Copilot token refreshed");
return { token: data.token, expiresAt: data.expires_at };
@@ -180,7 +185,7 @@ export class GithubExecutor extends BaseExecutor {
async refreshCredentials(credentials, log) {
let copilotResult = await this.refreshCopilotToken(credentials.accessToken, log);
if (!copilotResult && credentials.refreshToken) {
const githubTokens = await this.refreshGitHubToken(credentials.refreshToken, log);
if (githubTokens?.accessToken) {
@@ -191,18 +196,18 @@ export class GithubExecutor extends BaseExecutor {
return githubTokens;
}
}
if (copilotResult) {
return { accessToken: credentials.accessToken, refreshToken: credentials.refreshToken, copilotToken: copilotResult.token, copilotTokenExpiresAt: copilotResult.expiresAt };
}
return null;
}
needsRefresh(credentials) {
// Always refresh if no copilotToken
if (!credentials.copilotToken) return true;
if (credentials.copilotTokenExpiresAt) {
// Handle both Unix timestamp (seconds) and ISO string
let expiresAtMs = credentials.copilotTokenExpiresAt;

View File

@@ -9,11 +9,39 @@ const ALIAS_TO_PROVIDER_ID = {
gh: "github",
kr: "kiro",
cu: "cursor",
// API Key providers (alias = id)
// API Key providers
openai: "openai",
anthropic: "anthropic",
gemini: "gemini",
openrouter: "openrouter",
glm: "glm",
kimi: "kimi",
minimax: "minimax",
"minimax-cn": "minimax-cn",
ds: "deepseek",
deepseek: "deepseek",
groq: "groq",
xai: "xai",
mistral: "mistral",
pplx: "perplexity",
perplexity: "perplexity",
together: "together",
fireworks: "fireworks",
cerebras: "cerebras",
cohere: "cohere",
nvidia: "nvidia",
nebius: "nebius",
siliconflow: "siliconflow",
hyp: "hyperbolic",
hyperbolic: "hyperbolic",
dg: "deepgram",
deepgram: "deepgram",
aai: "assemblyai",
assemblyai: "assemblyai",
nb: "nanobanana",
nanobanana: "nanobanana",
ch: "chutes",
chutes: "chutes",
cursor: "cursor",
};
@@ -42,7 +70,12 @@ export function parseModel(modelStr) {
}
// Alias format (model alias, not provider alias)
return { provider: null, model: modelStr, isAlias: true, providerAlias: null };
return {
provider: null,
model: modelStr,
isAlias: true,
providerAlias: null,
};
}
/**
@@ -51,29 +84,29 @@ export function parseModel(modelStr) {
*/
export function resolveModelAliasFromMap(alias, aliases) {
if (!aliases) return null;
// Check if alias exists
const resolved = aliases[alias];
if (!resolved) return null;
// Resolved value is "provider/model" format
if (typeof resolved === "string" && resolved.includes("/")) {
const firstSlash = resolved.indexOf("/");
const providerOrAlias = resolved.slice(0, firstSlash);
return {
provider: resolveProviderAlias(providerOrAlias),
model: resolved.slice(firstSlash + 1)
model: resolved.slice(firstSlash + 1),
};
}
// Or object { provider, model }
if (typeof resolved === "object" && resolved.provider && resolved.model) {
return {
provider: resolveProviderAlias(resolved.provider),
model: resolved.model
model: resolved.model,
};
}
return null;
}
@@ -93,9 +126,10 @@ export async function getModelInfoCore(modelStr, aliasesOrGetter) {
}
// Get aliases (from object or function)
const aliases = typeof aliasesOrGetter === "function"
? await aliasesOrGetter()
: aliasesOrGetter;
const aliases =
typeof aliasesOrGetter === "function"
? await aliasesOrGetter()
: aliasesOrGetter;
// Resolve alias
const resolved = resolveModelAliasFromMap(parsed.model, aliases);
@@ -103,10 +137,26 @@ export async function getModelInfoCore(modelStr, aliasesOrGetter) {
return resolved;
}
// Fallback: treat as openai model
// Fallback: infer provider from model name prefix
return {
provider: "openai",
model: parsed.model
provider: inferProviderFromModelName(parsed.model),
model: parsed.model,
};
}
/**
* Infer provider from model name prefix
* Used as fallback when no provider prefix or alias is given
*/
function inferProviderFromModelName(modelName) {
if (!modelName) return "openai";
const m = modelName.toLowerCase();
if (m.startsWith("claude-")) return "anthropic";
if (m.startsWith("gemini-")) return "gemini";
if (m.startsWith("gpt-")) return "openai";
if (m.startsWith("o1") || m.startsWith("o3") || m.startsWith("o4"))
return "openai";
if (m.startsWith("deepseek-")) return "openrouter";
// Default fallback
return "openai";
}

View File

@@ -1,4 +1,4 @@
import { PROVIDERS, OAUTH_ENDPOINTS } from "../config/constants.js";
import { PROVIDERS, OAUTH_ENDPOINTS, GITHUB_COPILOT } from "../config/constants.js";
// Token expiry buffer (refresh if expires within 5 minutes)
export const TOKEN_EXPIRY_BUFFER_MS = 5 * 60 * 1000;
@@ -301,6 +301,7 @@ export async function refreshKiroToken(refreshToken, providerSpecificData, log)
headers: {
"Content-Type": "application/json",
Accept: "application/json",
"User-Agent": "kiro-cli/1.0.0",
},
body: JSON.stringify({
refreshToken: refreshToken,
@@ -425,10 +426,11 @@ export async function refreshCopilotToken(githubAccessToken, log) {
const response = await fetch("https://api.github.com/copilot_internal/v2/token", {
headers: {
"Authorization": `token ${githubAccessToken}`,
"User-Agent": "GithubCopilot/1.0",
"Editor-Version": "vscode/1.100.0",
"Editor-Plugin-Version": "copilot/1.300.0",
"Accept": "application/json"
"User-Agent": GITHUB_COPILOT.USER_AGENT,
"Editor-Version": `vscode/${GITHUB_COPILOT.VSCODE_VERSION}`,
"Editor-Plugin-Version": `copilot-chat/${GITHUB_COPILOT.COPILOT_CHAT_VERSION}`,
"Accept": "application/json",
"x-github-api-version": GITHUB_COPILOT.API_VERSION
}
});

View File

@@ -221,16 +221,26 @@ async function getAntigravityUsage(accessToken, providerSpecificData) {
"Authorization": `Bearer ${accessToken}`,
"User-Agent": ANTIGRAVITY_CONFIG.userAgent,
"Content-Type": "application/json",
"X-Client-Name": "antigravity",
"X-Client-Version": "1.107.0",
},
body: JSON.stringify({
...(projectId ? { project: projectId } : {}),
metadata: CLIENT_METADATA,
mode: 1
...(projectId ? { project: projectId } : {})
}),
});
if (response.status === 403) {
return { message: "Antigravity access forbidden. Check subscription." };
return {
message: "Antigravity quota API access forbidden. Chat may still work.",
quotas: {}
};
}
if (response.status === 401) {
return {
message: "Antigravity quota API authentication expired. Chat may still work.",
quotas: {}
};
}
if (!response.ok) {
@@ -470,6 +480,15 @@ async function getKiroUsage(accessToken, providerSpecificData) {
if (!response.ok) {
const errorText = await response.text();
// Handle authentication errors gracefully
if (response.status === 403 || response.status === 401) {
return {
message: "Kiro quota API authentication expired. Chat may still work.",
quotas: {}
};
}
throw new Error(`Kiro API error (${response.status}): ${errorText}`);
}

View File

@@ -5,8 +5,11 @@
"private": true,
"scripts": {
"dev": "next dev --webpack --port 20128",
"build": "next build --webpack",
"start": "next start"
"build": "NODE_ENV=production next build --webpack",
"start": "NODE_ENV=production next start",
"dev:bun": "bun --bun next dev --webpack --port 20128",
"build:bun": "NODE_ENV=production bun --bun next build --webpack",
"start:bun": "NODE_ENV=production bun ./.next/standalone/server.js"
},
"dependencies": {
"@monaco-editor/react": "^4.7.0",
@@ -37,6 +40,7 @@
"@tailwindcss/postcss": "^4.1.18",
"eslint": "^9",
"eslint-config-next": "16.1.6",
"postcss": "^8.5.6",
"tailwindcss": "^4"
}
}
}

View File

@@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" width="192" height="192" viewBox="0 0 192 192">
<rect width="192" height="192" rx="24" fill="#0a0a0a"/>
<text x="96" y="120" font-family="Arial, sans-serif" font-size="80" font-weight="bold" fill="#ffffff" text-anchor="middle">9R</text>
</svg>

After

Width:  |  Height:  |  Size: 288 B

View File

@@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" width="512" height="512" viewBox="0 0 512 512">
<rect width="512" height="512" rx="64" fill="#0a0a0a"/>
<text x="256" y="320" font-family="Arial, sans-serif" font-size="200" font-weight="bold" fill="#ffffff" text-anchor="middle">9R</text>
</svg>

After

Width:  |  Height:  |  Size: 290 B

BIN
public/providers/chutes.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

22
public/sw.js Normal file
View File

@@ -0,0 +1,22 @@
self.addEventListener('push', function (event) {
if (event.data) {
const data = event.data.json()
const options = {
body: data.body,
icon: data.icon || '/icons/icon-192.svg',
badge: '/icons/icon-192.svg',
vibrate: [100, 50, 100],
data: {
dateOfArrival: Date.now(),
primaryKey: '2',
},
}
event.waitUntil(self.registration.showNotification(data.title, options))
}
})
self.addEventListener('notificationclick', function (event) {
console.log('Notification click received.')
event.notification.close()
event.waitUntil(clients.openWindow('/'))
})

View File

@@ -1,6 +1,6 @@
"use client";
import { useState, Suspense, useEffect } from "react";
import { Suspense, useState } from "react";
import { useSearchParams, useRouter } from "next/navigation";
import { UsageStats, RequestLogger, CardSkeleton, SegmentedControl } from "@/shared/components";
import ProviderLimits from "./components/ProviderLimits";
@@ -9,28 +9,25 @@ import RequestDetailsTab from "./components/RequestDetailsTab";
export default function UsagePage() {
return (
<Suspense fallback={<CardSkeleton />}>
<UsagePageContent />
<UsageContent />
</Suspense>
);
}
function UsagePageContent() {
function UsageContent() {
const searchParams = useSearchParams();
const router = useRouter();
const [activeTab, setActiveTab] = useState(searchParams.get("tab") || "overview");
const [tabLoading, setTabLoading] = useState(false);
useEffect(() => {
const tabFromUrl = searchParams.get("tab");
if (tabFromUrl && ["overview", "logs", "limits", "details"].includes(tabFromUrl)) {
setActiveTab(tabFromUrl);
}
}, [searchParams]);
const tabFromUrl = searchParams.get("tab");
const activeTab = tabFromUrl && ["overview", "logs", "limits", "details"].includes(tabFromUrl)
? tabFromUrl
: "overview";
const handleTabChange = (value) => {
if (value === activeTab) return;
setTabLoading(true);
setActiveTab(value);
const params = new URLSearchParams(searchParams);
params.set("tab", value);
router.push(`/dashboard/usage?${params.toString()}`, { scroll: false });
@@ -71,3 +68,4 @@ function UsagePageContent() {
</div>
);
}

View File

@@ -1,6 +1,48 @@
import { NextResponse } from "next/server";
import { getProviderConnectionById } from "@/models";
import { isOpenAICompatibleProvider, isAnthropicCompatibleProvider } from "@/shared/constants/providers";
import { KiroService } from "@/lib/oauth/services/kiro";
import { GEMINI_CONFIG } from "@/lib/oauth/constants/oauth";
import { refreshGoogleToken, updateProviderCredentials } from "@/sse/services/tokenRefresh";
const GEMINI_CLI_MODELS_URL = "https://cloudcode-pa.googleapis.com/v1internal:fetchAvailableModels";
const parseOpenAIStyleModels = (data) => {
if (Array.isArray(data)) return data;
return data?.data || data?.models || data?.results || [];
};
const parseGeminiCliModels = (data) => {
if (Array.isArray(data?.models)) {
return data.models
.map((item) => {
const id = item?.id || item?.model || item?.name;
if (!id) return null;
return { id, name: item?.displayName || item?.name || id };
})
.filter(Boolean);
}
if (data?.models && typeof data.models === "object") {
return Object.entries(data.models)
.filter(([, info]) => !info?.isInternal)
.map(([id, info]) => ({
id,
name: info?.displayName || info?.name || id,
}));
}
return [];
};
const createOpenAIModelsConfig = (url) => ({
url,
method: "GET",
headers: { "Content-Type": "application/json" },
authHeader: "Authorization",
authPrefix: "Bearer ",
parseResponse: parseOpenAIStyleModels
});
// Provider models endpoints configuration
const PROVIDER_MODELS_CONFIG = {
@@ -21,14 +63,6 @@ const PROVIDER_MODELS_CONFIG = {
authQuery: "key", // Use query param for API key
parseResponse: (data) => data.models || []
},
"gemini-cli": {
url: "https://generativelanguage.googleapis.com/v1beta/models",
method: "GET",
headers: { "Content-Type": "application/json" },
authHeader: "Authorization",
authPrefix: "Bearer ",
parseResponse: (data) => data.models || []
},
qwen: {
url: "https://portal.qwen.ai/v1/models",
method: "GET",
@@ -46,22 +80,35 @@ const PROVIDER_MODELS_CONFIG = {
body: {},
parseResponse: (data) => data.models || []
},
openai: {
url: "https://api.openai.com/v1/models",
github: {
url: "https://api.githubcopilot.com/models",
method: "GET",
headers: { "Content-Type": "application/json" },
headers: {
"Content-Type": "application/json",
"Copilot-Integration-Id": "vscode-chat",
"editor-version": "vscode/1.107.1",
"editor-plugin-version": "copilot-chat/0.26.7",
"user-agent": "GitHubCopilotChat/0.26.7"
},
authHeader: "Authorization",
authPrefix: "Bearer ",
parseResponse: (data) => data.data || []
},
openrouter: {
url: "https://openrouter.ai/api/v1/models",
method: "GET",
headers: { "Content-Type": "application/json" },
authHeader: "Authorization",
authPrefix: "Bearer ",
parseResponse: (data) => data.data || []
parseResponse: (data) => {
if (!data?.data) return [];
// Filter out embeddings, non-chat models, and disabled models
return data.data
.filter(m => m.capabilities?.type === "chat")
.filter(m => m.policy?.state !== "disabled") // Only return explicitly enabled models
.map(m => ({
id: m.id,
name: m.name || m.id,
version: m.version,
capabilities: m.capabilities,
isDefault: m.model_picker_enabled === true
}));
}
},
openai: createOpenAIModelsConfig("https://api.openai.com/v1/models"),
openrouter: createOpenAIModelsConfig("https://openrouter.ai/api/v1/models"),
anthropic: {
url: "https://api.anthropic.com/v1/models",
method: "GET",
@@ -71,7 +118,25 @@ const PROVIDER_MODELS_CONFIG = {
},
authHeader: "x-api-key",
parseResponse: (data) => data.data || []
}
},
// OpenAI-compatible API key providers
deepseek: createOpenAIModelsConfig("https://api.deepseek.com/models"),
groq: createOpenAIModelsConfig("https://api.groq.com/openai/v1/models"),
xai: createOpenAIModelsConfig("https://api.x.ai/v1/models"),
mistral: createOpenAIModelsConfig("https://api.mistral.ai/v1/models"),
perplexity: createOpenAIModelsConfig("https://api.perplexity.ai/models"),
together: createOpenAIModelsConfig("https://api.together.xyz/v1/models"),
fireworks: createOpenAIModelsConfig("https://api.fireworks.ai/inference/v1/models"),
cerebras: createOpenAIModelsConfig("https://api.cerebras.ai/v1/models"),
cohere: createOpenAIModelsConfig("https://api.cohere.ai/v1/models"),
nebius: createOpenAIModelsConfig("https://api.studio.nebius.ai/v1/models"),
siliconflow: createOpenAIModelsConfig("https://api.siliconflow.cn/v1/models"),
hyperbolic: createOpenAIModelsConfig("https://api.hyperbolic.xyz/v1/models"),
nanobanana: createOpenAIModelsConfig("https://api.nanobananaapi.ai/v1/models"),
chutes: createOpenAIModelsConfig("https://llm.chutes.ai/v1/models"),
nvidia: createOpenAIModelsConfig("https://integrate.api.nvidia.com/v1/models"),
assemblyai: createOpenAIModelsConfig("https://api.assemblyai.com/v1/models")
};
/**
@@ -124,12 +189,12 @@ export async function GET(request, { params }) {
if (!baseUrl) {
return NextResponse.json({ error: "No base URL configured for Anthropic compatible provider" }, { status: 400 });
}
baseUrl = baseUrl.replace(/\/$/, "");
if (baseUrl.endsWith("/messages")) {
baseUrl = baseUrl.slice(0, -9);
}
const url = `${baseUrl}/models`;
const response = await fetch(url, {
method: "GET",
@@ -160,6 +225,96 @@ export async function GET(request, { params }) {
});
}
// Kiro: Try dynamic model fetching first
if (connection.provider === "kiro") {
try {
const kiroService = new KiroService();
const profileArn = connection.providerSpecificData?.profileArn;
const accessToken = connection.accessToken;
if (accessToken && profileArn) {
const models = await kiroService.listAvailableModels(accessToken, profileArn);
return NextResponse.json({
provider: connection.provider,
connectionId: connection.id,
models
});
}
} catch (error) {
console.log("Failed to fetch Kiro models dynamically, falling back to static:", error.message);
}
}
if (connection.provider === "gemini-cli") {
const { accessToken, refreshToken } = connection;
if (!accessToken) {
return NextResponse.json({ error: "No valid token found" }, { status: 401 });
}
const projectId = connection.projectId || connection.providerSpecificData?.projectId;
const body = projectId ? { project: projectId } : {};
const fetchModels = async (token) => {
const response = await fetch(GEMINI_CLI_MODELS_URL, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${token}`,
"User-Agent": "google-api-nodejs-client/9.15.1",
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1"
},
body: JSON.stringify(body)
});
return response;
};
let warning;
try {
let response = await fetchModels(accessToken);
// Attempt refresh on 401/403 when refresh token exists
if (!response.ok && (response.status === 401 || response.status === 403) && refreshToken) {
const refreshed = await refreshGoogleToken(refreshToken, GEMINI_CONFIG.clientId, GEMINI_CONFIG.clientSecret);
if (refreshed?.accessToken) {
await updateProviderCredentials(connection.id, {
accessToken: refreshed.accessToken,
refreshToken: refreshed.refreshToken,
expiresIn: refreshed.expiresIn,
});
response = await fetchModels(refreshed.accessToken);
}
}
if (response.ok) {
const data = await response.json();
const models = parseGeminiCliModels(data);
if (models.length > 0) {
return NextResponse.json({
provider: connection.provider,
connectionId: connection.id,
models
});
}
} else {
const errorText = await response.text();
warning = `Failed to fetch Gemini CLI models: ${response.status} ${errorText}`;
console.log("Failed to fetch Gemini CLI models dynamically, falling back to static:", errorText);
}
} catch (error) {
warning = `Failed to fetch Gemini CLI models: ${error.message}`;
console.log("Failed to fetch Gemini CLI models dynamically, falling back to static:", error.message);
}
// Return empty dynamic list so UI falls back to static provider models.
return NextResponse.json({
provider: connection.provider,
connectionId: connection.id,
models: [],
warning,
});
}
const config = PROVIDER_MODELS_CONFIG[connection.provider];
if (!config) {
return NextResponse.json(
@@ -169,7 +324,7 @@ export async function GET(request, { params }) {
}
// Get auth token
const token = connection.accessToken || connection.apiKey;
const token = connection.providerSpecificData?.copilotToken || connection.accessToken || connection.apiKey;
if (!token) {
return NextResponse.json({ error: "No valid token found" }, { status: 401 });
}

View File

@@ -30,7 +30,18 @@ export async function PUT(request, { params }) {
try {
const { id } = await params;
const body = await request.json();
const { name, priority, globalPriority, defaultModel, isActive, apiKey, testStatus, lastError, lastErrorAt } = body;
const {
name,
priority,
globalPriority,
defaultModel,
isActive,
apiKey,
testStatus,
lastError,
lastErrorAt,
providerSpecificData
} = body;
const existing = await getProviderConnectionById(id);
if (!existing) {
@@ -47,6 +58,12 @@ export async function PUT(request, { params }) {
if (testStatus !== undefined) updateData.testStatus = testStatus;
if (lastError !== undefined) updateData.lastError = lastError;
if (lastErrorAt !== undefined) updateData.lastErrorAt = lastErrorAt;
if (providerSpecificData !== undefined) {
updateData.providerSpecificData = {
...(existing.providerSpecificData || {}),
...providerSpecificData,
};
}
const updated = await updateProviderConnection(id, updateData);

View File

@@ -281,6 +281,58 @@ async function testApiKeyConnection(connection) {
const res = await fetch("https://api.x.ai/v1/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "nvidia": {
const res = await fetch("https://integrate.api.nvidia.com/v1/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "perplexity": {
const res = await fetch("https://api.perplexity.ai/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "together": {
const res = await fetch("https://api.together.xyz/v1/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "fireworks": {
const res = await fetch("https://api.fireworks.ai/inference/v1/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "cerebras": {
const res = await fetch("https://api.cerebras.ai/v1/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "cohere": {
const res = await fetch("https://api.cohere.ai/v1/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "nebius": {
const res = await fetch("https://api.studio.nebius.ai/v1/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "siliconflow": {
const res = await fetch("https://api.siliconflow.cn/v1/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "hyperbolic": {
const res = await fetch("https://api.hyperbolic.xyz/v1/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "deepgram": {
const res = await fetch("https://api.deepgram.com/v1/projects", { headers: { Authorization: `Token ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "assemblyai": {
const res = await fetch("https://api.assemblyai.com/v1/account", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "nanobanana": {
const res = await fetch("https://api.nanobananaapi.ai/v1/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
case "chutes": {
const res = await fetch("https://llm.chutes.ai/v1/models", { headers: { Authorization: `Bearer ${connection.apiKey}` } });
return { valid: res.ok, error: res.ok ? null : "Invalid API key" };
}
default:
return { valid: false, error: "Provider test not supported" };
}

View File

@@ -38,22 +38,22 @@ export async function POST(request) {
if (!node) {
return NextResponse.json({ error: "Anthropic Compatible node not found" }, { status: 404 });
}
let normalizedBase = node.baseUrl?.trim().replace(/\/$/, "") || "";
if (normalizedBase.endsWith("/messages")) {
normalizedBase = normalizedBase.slice(0, -9); // remove /messages
}
const modelsUrl = `${normalizedBase}/models`;
const res = await fetch(modelsUrl, {
headers: {
headers: {
"x-api-key": apiKey,
"anthropic-version": "2023-06-01",
"Authorization": `Bearer ${apiKey}`
"Authorization": `Bearer ${apiKey}`
},
});
isValid = res.ok;
return NextResponse.json({
valid: isValid,
@@ -145,8 +145,57 @@ export async function POST(request) {
break;
}
default:
return NextResponse.json({ error: "Provider validation not supported" }, { status: 400 });
case "deepseek":
case "groq":
case "xai":
case "mistral":
case "perplexity":
case "together":
case "fireworks":
case "cerebras":
case "cohere":
case "nebius":
case "siliconflow":
case "hyperbolic":
case "assemblyai":
case "nanobanana":
case "chutes":
case "nvidia": {
const endpoints = {
deepseek: "https://api.deepseek.com/models",
groq: "https://api.groq.com/openai/v1/models",
xai: "https://api.x.ai/v1/models",
mistral: "https://api.mistral.ai/v1/models",
perplexity: "https://api.perplexity.ai/models",
together: "https://api.together.xyz/v1/models",
fireworks: "https://api.fireworks.ai/inference/v1/models",
cerebras: "https://api.cerebras.ai/v1/models",
cohere: "https://api.cohere.ai/v1/models",
nebius: "https://api.studio.nebius.ai/v1/models",
siliconflow: "https://api.siliconflow.cn/v1/models",
hyperbolic: "https://api.hyperbolic.xyz/v1/models",
assemblyai: "https://api.assemblyai.com/v1/account",
nanobanana: "https://api.nanobananaapi.ai/v1/models",
chutes: "https://llm.chutes.ai/v1/models",
nvidia: "https://integrate.api.nvidia.com/v1/models"
};
const res = await fetch(endpoints[provider], {
headers: { "Authorization": `Bearer ${apiKey}` },
});
isValid = res.ok;
break;
}
case "deepgram": {
const res = await fetch("https://api.deepgram.com/v1/projects", {
headers: { "Authorization": `Token ${apiKey}` },
});
isValid = res.ok;
break;
}
default:
return NextResponse.json({ error: "Provider validation not supported" }, { status: 400 });
}
} catch (err) {
error = err.message;

View File

@@ -7,12 +7,12 @@ import { getExecutor } from "open-sse/executors/index.js";
*/
async function refreshAndUpdateCredentials(connection) {
const executor = getExecutor(connection.provider);
// Build credentials object from connection
const credentials = {
accessToken: connection.accessToken,
refreshToken: connection.refreshToken,
expiresAt: connection.tokenExpiresAt,
expiresAt: connection.expiresAt || connection.tokenExpiresAt,
providerSpecificData: connection.providerSpecificData,
// For GitHub
copilotToken: connection.providerSpecificData?.copilotToken,
@@ -21,7 +21,7 @@ async function refreshAndUpdateCredentials(connection) {
// Check if refresh is needed
const needsRefresh = executor.needsRefresh(credentials);
if (!needsRefresh) {
return { connection, refreshed: false };
}
@@ -55,9 +55,9 @@ async function refreshAndUpdateCredentials(connection) {
// Update token expiry
if (refreshResult.expiresIn) {
updateData.tokenExpiresAt = new Date(Date.now() + refreshResult.expiresIn * 1000).toISOString();
updateData.expiresAt = new Date(Date.now() + refreshResult.expiresIn * 1000).toISOString();
} else if (refreshResult.expiresAt) {
updateData.tokenExpiresAt = refreshResult.expiresAt;
updateData.expiresAt = refreshResult.expiresAt;
}
// Handle provider-specific data (copilotToken for GitHub, etc.)
@@ -77,7 +77,7 @@ async function refreshAndUpdateCredentials(connection) {
...connection,
...updateData,
};
return {
connection: updatedConnection,
refreshed: true,
@@ -90,7 +90,7 @@ async function refreshAndUpdateCredentials(connection) {
export async function GET(request, { params }) {
try {
const { connectionId } = await params;
// Get connection from database
let connection = await getProviderConnectionById(connectionId);
if (!connection) {
@@ -108,8 +108,8 @@ export async function GET(request, { params }) {
connection = result.connection;
} catch (refreshError) {
console.error("[Usage API] Credential refresh failed:", refreshError);
return Response.json({
error: `Credential refresh failed: ${refreshError.message}`
return Response.json({
error: `Credential refresh failed: ${refreshError.message}`
}, { status: 401 });
}

View File

@@ -1,4 +1,5 @@
import { PROVIDER_MODELS, PROVIDER_ID_TO_ALIAS } from "@/shared/constants/models";
import { getProviderAlias } from "@/shared/constants/providers";
import { getProviderConnections, getCombos } from "@/lib/localDb";
/**
@@ -39,11 +40,12 @@ export async function GET() {
console.log("Could not fetch combos");
}
// Build set of active provider aliases
const activeAliases = new Set();
// Build first active connection per provider (connections already sorted by priority)
const activeConnectionByProvider = new Map();
for (const conn of connections) {
const alias = PROVIDER_ID_TO_ALIAS[conn.provider] || conn.provider;
activeAliases.add(alias);
if (!activeConnectionByProvider.has(conn.provider)) {
activeConnectionByProvider.set(conn.provider, conn);
}
}
// Collect models from active providers (or all if none active)
@@ -64,22 +66,68 @@ export async function GET() {
}
// Add provider models
for (const [alias, providerModels] of Object.entries(PROVIDER_MODELS)) {
// If we have active providers, only include those; otherwise include all
if (connections.length > 0 && !activeAliases.has(alias)) {
continue;
if (connections.length === 0) {
// DB unavailable or no active providers -> return all static models
for (const [alias, providerModels] of Object.entries(PROVIDER_MODELS)) {
for (const model of providerModels) {
models.push({
id: `${alias}/${model.id}`,
object: "model",
created: timestamp,
owned_by: alias,
permission: [],
root: model.id,
parent: null,
});
}
}
} else {
for (const [providerId, conn] of activeConnectionByProvider.entries()) {
const staticAlias = PROVIDER_ID_TO_ALIAS[providerId] || providerId;
const outputAlias = getProviderAlias(providerId) || staticAlias;
const providerModels = PROVIDER_MODELS[staticAlias] || [];
const enabledModels = conn?.providerSpecificData?.enabledModels;
const hasExplicitEnabledModels =
Array.isArray(enabledModels) && enabledModels.length > 0;
for (const model of providerModels) {
models.push({
id: `${alias}/${model.id}`,
object: "model",
created: timestamp,
owned_by: alias,
permission: [],
root: model.id,
parent: null,
});
// Default: if no explicit selection, all static models are active.
// If explicit selection exists, expose exactly those model IDs (including non-static IDs).
const rawModelIds = hasExplicitEnabledModels
? Array.from(
new Set(
enabledModels.filter(
(modelId) => typeof modelId === "string" && modelId.trim() !== "",
),
),
)
: providerModels.map((model) => model.id);
const modelIds = rawModelIds
.map((modelId) => {
if (modelId.startsWith(`${outputAlias}/`)) {
return modelId.slice(outputAlias.length + 1);
}
if (modelId.startsWith(`${staticAlias}/`)) {
return modelId.slice(staticAlias.length + 1);
}
if (modelId.startsWith(`${providerId}/`)) {
return modelId.slice(providerId.length + 1);
}
return modelId;
})
.filter((modelId) => typeof modelId === "string" && modelId.trim() !== "");
for (const modelId of modelIds) {
models.push({
id: `${outputAlias}/${modelId}`,
object: "model",
created: timestamp,
owned_by: outputAlias,
permission: [],
root: modelId,
parent: null,
});
}
}
}

View File

@@ -16,6 +16,10 @@ export const metadata = {
},
};
export const viewport = {
themeColor: "#0a0a0a",
};
export default function RootLayout({ children }) {
return (
<html lang="en" suppressHydrationWarning>

30
src/app/manifest.js Normal file
View File

@@ -0,0 +1,30 @@
export default function manifest() {
return {
name: '9Router - AI Infrastructure Management',
short_name: '9Router',
description: 'One endpoint for all your AI providers. Manage keys, monitor usage, and scale effortlessly.',
start_url: '/',
display: 'standalone',
background_color: '#0a0a0a',
theme_color: '#0a0a0a',
orientation: 'portrait-primary',
icons: [
{
src: '/icons/icon-192.svg',
sizes: '192x192',
type: 'image/svg+xml',
},
{
src: '/icons/icon-512.svg',
sizes: '512x512',
type: 'image/svg+xml',
},
{
src: '/icons/icon-512.svg',
sizes: '512x512',
type: 'image/svg+xml',
purpose: 'maskable',
},
],
}
}

View File

@@ -18,7 +18,7 @@ export class KiroService {
*/
async registerClient(region = "us-east-1") {
const endpoint = `https://oidc.${region}.amazonaws.com/client/register`;
const response = await fetch(endpoint, {
method: "POST",
headers: {
@@ -51,7 +51,7 @@ export class KiroService {
*/
async startDeviceAuthorization(clientId, clientSecret, startUrl, region = "us-east-1") {
const endpoint = `https://oidc.${region}.amazonaws.com/device_authorization`;
const response = await fetch(endpoint, {
method: "POST",
headers: {
@@ -85,7 +85,7 @@ export class KiroService {
*/
async pollDeviceToken(clientId, clientSecret, deviceCode, region = "us-east-1") {
const endpoint = `https://oidc.${region}.amazonaws.com/token`;
const response = await fetch(endpoint, {
method: "POST",
headers: {
@@ -141,7 +141,7 @@ export class KiroService {
async exchangeSocialCode(code, codeVerifier) {
// Must match the redirect_uri used in buildSocialLoginUrl
const redirectUri = "kiro://kiro.kiroAgent/authenticate-success";
const response = await fetch(`${KIRO_AUTH_SERVICE}/oauth/token`, {
method: "POST",
headers: {
@@ -177,7 +177,7 @@ export class KiroService {
// AWS SSO OIDC refresh (Builder ID or IDC)
if (clientId && clientSecret) {
const endpoint = `https://oidc.${region || "us-east-1"}.amazonaws.com/token`;
const response = await fetch(endpoint, {
method: "POST",
headers: {
@@ -253,6 +253,43 @@ export class KiroService {
}
}
/**
* List available models from CodeWhisperer API
*/
async listAvailableModels(accessToken, profileArn) {
const endpoint = "https://codewhisperer.us-east-1.amazonaws.com";
const target = "AmazonCodeWhispererService.ListAvailableModels";
const response = await fetch(endpoint, {
method: "POST",
headers: {
"Content-Type": "application/x-amz-json-1.0",
"x-amz-target": target,
"Authorization": `Bearer ${accessToken}`,
"Accept": "application/json",
},
body: JSON.stringify({
origin: "AI_EDITOR",
profileArn,
}),
});
if (!response.ok) {
const error = await response.text();
throw new Error(`Failed to list models: ${error}`);
}
const data = await response.json();
return (data.models || []).map(m => ({
id: m.modelId,
name: m.modelName || m.modelId,
description: m.description,
rateMultiplier: m.rateMultiplier,
rateUnit: m.rateUnit,
maxInputTokens: m.tokenLimits?.maxInputTokens || 0,
}));
}
/**
* Fetch user email from access token (optional, for display)
*/

View File

@@ -1,4 +1,5 @@
const { spawn, exec } = require("child_process");
const cp = require("child_process");
const { exec } = cp;
const path = require("path");
const fs = require("fs");
const os = require("os");
@@ -65,11 +66,11 @@ function isProcessAlive(pid) {
function killProcess(pid, force = false) {
if (IS_WIN) {
const flag = force ? "/F " : "";
exec(`taskkill ${flag}/PID ${pid}`, () => {});
exec(`taskkill ${flag}/PID ${pid}`, () => { });
} else {
// Use pkill to kill entire process group (catches sudo + child node process)
const sig = force ? "SIGKILL" : "SIGTERM";
exec(`pkill -${sig} -P ${pid} 2>/dev/null; kill -${sig} ${pid} 2>/dev/null`, () => {});
exec(`pkill -${sig} -P ${pid} 2>/dev/null; kill -${sig} ${pid} 2>/dev/null`, () => { });
}
}
@@ -231,9 +232,9 @@ async function killLeftoverMitm(sudoPassword) {
const escaped = SERVER_PATH.replace(/'/g, "'\\''");
if (sudoPassword) {
const { execWithPassword } = require("./dns/dnsConfig");
await execWithPassword(`pkill -SIGKILL -f "${escaped}" 2>/dev/null || true`, sudoPassword).catch(() => {});
await execWithPassword(`pkill -SIGKILL -f "${escaped}" 2>/dev/null || true`, sudoPassword).catch(() => { });
} else {
exec(`pkill -SIGKILL -f "${escaped}" 2>/dev/null || true`, () => {});
exec(`pkill -SIGKILL -f "${escaped}" 2>/dev/null || true`, () => { });
}
await new Promise(r => setTimeout(r, 500));
} catch { /* ignore */ }
@@ -378,7 +379,7 @@ async function startMitm(apiKey, sudoPassword) {
const certAlreadyInstalled = settings.mitmCertInstalled && fs.existsSync(certPath);
if (!certAlreadyInstalled) {
await installCert(sudoPassword, certPath);
if (_updateSettings) await _updateSettings({ mitmCertInstalled: true }).catch(() => {});
if (_updateSettings) await _updateSettings({ mitmCertInstalled: true }).catch(() => { });
}
// 3. Add DNS entry

View File

@@ -74,13 +74,13 @@ export default function KiroAuthModal({ isOpen, onMethodSelect, onClose }) {
});
const data = await res.json();
if (!res.ok) {
throw new Error(data.error || "Import failed");
}
// Success - close modal
onClose();
// Success - notify parent to refresh connections
onMethodSelect("import");
} catch (err) {
setError(err.message);
} finally {

View File

@@ -46,6 +46,7 @@ export const APIKEY_PROVIDERS = {
deepgram: { id: "deepgram", alias: "dg", name: "Deepgram", icon: "mic", color: "#13EF93", textIcon: "DG", website: "https://deepgram.com" },
assemblyai: { id: "assemblyai", alias: "aai", name: "AssemblyAI", icon: "record_voice_over", color: "#0062FF", textIcon: "AA", website: "https://assemblyai.com" },
nanobanana: { id: "nanobanana", alias: "nb", name: "NanoBanana", icon: "image", color: "#FFD700", textIcon: "NB", website: "https://nanobananaapi.ai" },
chutes: { id: "chutes", alias: "ch", name: "Chutes AI", icon: "water_drop", color: "#5B6EF5", textIcon: "CH", website: "https://chutes.ai" },
};
export const OPENAI_COMPATIBLE_PREFIX = "openai-compatible-";

View File

@@ -28,7 +28,7 @@ export async function handleChat(request, clientRawRequest = null) {
log.warn("CHAT", "Invalid JSON body");
return errorResponse(HTTP_STATUS.BAD_REQUEST, "Invalid JSON body");
}
// Build clientRawRequest for logging (if not provided)
if (!clientRawRequest) {
const url = new URL(request.url);
@@ -42,7 +42,7 @@ export async function handleChat(request, clientRawRequest = null) {
// Log request endpoint and model
const url = new URL(request.url);
const modelStr = body.model;
// Count messages (support both messages[] and input[] formats)
const msgCount = body.messages?.length || body.input?.length || 0;
const toolCount = body.tools?.length || 0;
@@ -99,7 +99,19 @@ export async function handleChat(request, clientRawRequest = null) {
*/
async function handleSingleModelChat(body, modelStr, clientRawRequest = null, request = null, apiKey = null) {
const modelInfo = await getModelInfo(modelStr);
// If provider is null, this might be a combo name - check and handle
if (!modelInfo.provider) {
const comboModels = await getComboModels(modelStr);
if (comboModels) {
log.info("CHAT", `Combo "${modelStr}" with ${comboModels.length} models`);
return handleComboChat({
body,
models: comboModels,
handleSingleModel: (b, m) => handleSingleModelChat(b, m, clientRawRequest, request, apiKey, forceSourceFormat),
log
});
}
log.warn("CHAT", "Invalid model format", { model: modelStr });
return errorResponse(HTTP_STATUS.BAD_REQUEST, "Invalid model format");
}
@@ -178,12 +190,12 @@ async function handleSingleModelChat(body, modelStr, clientRawRequest = null, re
await clearAccountError(credentials.connectionId, credentials);
}
});
if (result.success) return result.response;
// Mark account unavailable (auto-calculates cooldown with exponential backoff)
const { shouldFallback } = await markAccountUnavailable(credentials.connectionId, result.status, result.error, provider, model);
if (shouldFallback) {
log.warn("AUTH", `Account ${accountId}... unavailable (${result.status}), trying fallback`);
excludeConnectionId = credentials.connectionId;

View File

@@ -268,11 +268,11 @@ export async function markAccountUnavailable(connectionId, status, errorText, pr
export async function clearAccountError(connectionId, currentConnection) {
// Only update if currently has error status
const hasError = currentConnection.testStatus === "unavailable" ||
currentConnection.lastError ||
currentConnection.rateLimitedUntil;
currentConnection.lastError ||
currentConnection.rateLimitedUntil;
if (!hasError) return; // Skip if already clean
await updateProviderConnection(connectionId, {
testStatus: "active",
lastError: null,
@@ -280,17 +280,25 @@ export async function clearAccountError(connectionId, currentConnection) {
rateLimitedUntil: null,
backoffLevel: 0
});
log.info("AUTH", `Account ${connectionId.slice(0,8)} error cleared`);
log.info("AUTH", `Account ${connectionId.slice(0, 8)} error cleared`);
}
/**
* Extract API key from request headers
*/
export function extractApiKey(request) {
// Check Authorization header first
const authHeader = request.headers.get("Authorization");
if (authHeader?.startsWith("Bearer ")) {
return authHeader.slice(7);
}
// Check Anthropic x-api-key header
const xApiKey = request.headers.get("x-api-key");
if (xApiKey) {
return xApiKey;
}
return null;
}

View File

@@ -40,6 +40,15 @@ export async function getModelInfo(modelStr) {
};
}
// Check if this is a combo name before resolving as alias
// This prevents combo names from being incorrectly routed to providers
const combo = await getComboByName(parsed.model);
if (combo) {
// Return null provider to signal this should be handled as combo
// The caller (handleChat) will detect this and handle it as combo
return { provider: null, model: parsed.model };
}
return getModelInfoCore(modelStr, getModelAliases);
}
@@ -50,7 +59,7 @@ export async function getModelInfo(modelStr) {
export async function getComboModels(modelStr) {
// Only check if it's not in provider/model format
if (modelStr.includes("/")) return null;
const combo = await getComboByName(modelStr);
if (combo && combo.models && combo.models.length > 0) {
return combo.models;