fix: update Qwen OAuth URLs from chat.qwen.ai to qwen.ai (closes #572) (#683)

* fix: make version update banner clickable to copy install command (closes #598)

* fix: resolve ollama-local baseUrl from providerSpecificData.baseUrl for remote Ollama hosts (closes #578)

* fix: add Ollama Cloud to usage/quota tracking (closes #681)

* fix: update Qwen OAuth URLs from chat.qwen.ai to qwen.ai per issue #572
This commit is contained in:
Anurag Saxena
2026-04-21 23:32:28 -04:00
committed by GitHub
parent 37f7e97348
commit 94ab0d715d
5 changed files with 67 additions and 6 deletions

View File

@@ -58,6 +58,8 @@ export async function getUsageForProvider(connection) {
return await getQwenUsage(accessToken, providerSpecificData);
case "iflow":
return await getIflowUsage(accessToken);
case "ollama":
return await getOllamaUsage(accessToken);
default:
return { message: `Usage API not implemented for ${provider}` };
}
@@ -719,3 +721,25 @@ async function getIflowUsage(accessToken) {
return { message: "Unable to fetch iFlow usage." };
}
}
/**
* Ollama Cloud Usage
* Ollama Cloud uses an API key from ollama.com/settings/keys
* and has no public usage API — free tier has light usage limits (resets every 5h & 7d).
* This returns an informational message with the plan details.
*/
async function getOllamaUsage(accessToken, providerSpecificData) {
try {
// Ollama Cloud does not expose a public quota/usage API.
// The provider is configured as noAuth with a notice explaining limits.
// We return a graceful message so the UI shows a friendly state instead of an error.
const plan = providerSpecificData?.plan || "Free";
return {
plan,
message: "Ollama Cloud uses a free tier with light usage limits (resets every 5h & 7d). For detailed usage tracking, visit ollama.com/settings/keys.",
quotas: [],
};
} catch (error) {
return { message: "Unable to fetch Ollama Cloud usage." };
}
}

View File

@@ -163,7 +163,7 @@ const PROVIDER_MODELS_CONFIG = {
siliconflow: createOpenAIModelsConfig("https://api.siliconflow.cn/v1/models"),
hyperbolic: createOpenAIModelsConfig("https://api.hyperbolic.xyz/v1/models"),
ollama: createOpenAIModelsConfig("https://ollama.com/api/tags"),
"ollama-local": createOpenAIModelsConfig("http://localhost:11434/api/tags"),
// ollama-local: url resolved dynamically below via providerSpecificData.baseUrl
nanobanana: createOpenAIModelsConfig("https://api.nanobananaapi.ai/v1/models"),
chutes: createOpenAIModelsConfig("https://llm.chutes.ai/v1/models"),
nvidia: createOpenAIModelsConfig("https://integrate.api.nvidia.com/v1/models"),
@@ -380,6 +380,34 @@ export async function GET(request, { params }) {
});
}
// Handle ollama-local: resolve URL from providerSpecificData.baseUrl if provided,
// otherwise fall back to default localhost address.
if (connection.provider === "ollama-local") {
const baseUrl = connection.providerSpecificData?.baseUrl;
const url = baseUrl
? `${baseUrl.replace(/\/$/, "")}/api/tags`
: "http://localhost:11434/api/tags";
const response = await fetch(url, {
method: "GET",
headers: { "Content-Type": "application/json" },
});
if (!response.ok) {
const errorText = await response.text();
console.log(`Error fetching models from ollama-local:`, errorText);
return NextResponse.json(
{ error: `Failed to fetch models: ${response.status}` },
{ status: response.status }
);
}
const data = await response.json();
const models = parseOpenAIStyleModels(data);
return NextResponse.json({
provider: connection.provider,
connectionId: connection.id,
models,
});
}
const config = PROVIDER_MODELS_CONFIG[connection.provider];
if (!config) {
return NextResponse.json(

View File

@@ -57,8 +57,8 @@ export const GEMINI_CONFIG = {
// Qwen OAuth Configuration (Device Code Flow with PKCE)
export const QWEN_CONFIG = {
clientId: "f0304373b74a44d2b584a3fb70ca9e56",
deviceCodeUrl: "https://chat.qwen.ai/api/v1/oauth2/device/code",
tokenUrl: "https://chat.qwen.ai/api/v1/oauth2/token",
deviceCodeUrl: "https://qwen.ai/api/v1/oauth2/device/code",
tokenUrl: "https://qwen.ai/api/v1/oauth2/token",
scope: "openid profile email model.completion",
codeChallengeMethod: "S256",
};

View File

@@ -7,6 +7,7 @@ import { usePathname } from "next/navigation";
import { cn } from "@/shared/utils/cn";
import { APP_CONFIG } from "@/shared/constants/config";
import { MEDIA_PROVIDER_KINDS } from "@/shared/constants/providers";
import { useCopyToClipboard } from "@/shared/hooks/useCopyToClipboard";
import Button from "./Button";
import { ConfirmModal } from "./Modal";
@@ -41,6 +42,9 @@ export default function Sidebar({ onClose }) {
const [isDisconnected, setIsDisconnected] = useState(false);
const [updateInfo, setUpdateInfo] = useState(null);
const [enableTranslator, setEnableTranslator] = useState(false);
const { copied, copy } = useCopyToClipboard(2000);
const INSTALL_CMD = "npm install -g 9router@latest";
useEffect(() => {
fetch("/api/settings")
@@ -100,14 +104,18 @@ export default function Sidebar({ onClose }) {
</div>
</Link>
{updateInfo && (
<div className="flex flex-col gap-0.5">
<button
onClick={() => copy(INSTALL_CMD)}
title="Click to copy install command"
className="flex flex-col gap-0.5 text-left hover:opacity-80 transition-opacity cursor-pointer rounded p-1 -m-1"
>
<span className="text-xs font-semibold text-green-600 dark:text-amber-500">
New version available: v{updateInfo.latestVersion}
</span>
<code className="text-[10px] text-green-600/80 dark:text-amber-400/70 font-mono select-all">
npm install -g 9router@latest
{copied ? "✓ copied!" : INSTALL_CMD}
</code>
</div>
</button>
)}
</div>

View File

@@ -186,4 +186,5 @@ export const USAGE_SUPPORTED_PROVIDERS = [
"github",
"codex",
"kimi-coding",
"ollama",
];