mirror of
https://github.com/decolua/9router.git
synced 2026-05-08 12:01:28 +00:00
Fix : Add reasoning_content placeholder for DeepSeek thinking models
This commit is contained in:
@@ -4,12 +4,17 @@ import { OAUTH_ENDPOINTS, buildKimiHeaders } from "../config/appConstants.js";
|
||||
import { buildClineHeaders } from "../../src/shared/utils/clineAuth.js";
|
||||
import { getCachedClaudeHeaders } from "../utils/claudeHeaderCache.js";
|
||||
import { proxyAwareFetch } from "../utils/proxyFetch.js";
|
||||
import { injectReasoningContent } from "../utils/reasoningContentInjector.js";
|
||||
|
||||
export class DefaultExecutor extends BaseExecutor {
|
||||
constructor(provider) {
|
||||
super(provider, PROVIDERS[provider] || PROVIDERS.openai);
|
||||
}
|
||||
|
||||
transformRequest(model, body) {
|
||||
return injectReasoningContent({ provider: this.provider, model, body });
|
||||
}
|
||||
|
||||
buildUrl(model, stream, urlIndex = 0, credentials = null) {
|
||||
if (this.provider?.startsWith?.("openai-compatible-")) {
|
||||
const baseUrl = credentials?.providerSpecificData?.baseUrl || "https://api.openai.com/v1";
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
import { BaseExecutor } from "./base.js";
|
||||
import { PROVIDERS } from "../config/providers.js";
|
||||
import { injectReasoningContent } from "../utils/reasoningContentInjector.js";
|
||||
|
||||
// Models that use /zen/go/v1/messages (Anthropic/Claude format + x-api-key auth)
|
||||
const CLAUDE_FORMAT_MODELS = new Set(["minimax-m2.5", "minimax-m2.7"]);
|
||||
|
||||
const BASE = "https://opencode.ai/zen/go/v1";
|
||||
|
||||
// Kimi (Moonshot) requires reasoning_content on assistant tool_call messages when thinking is on.
|
||||
// OpenAI-format clients don't send it -> upstream 400. Inject a non-empty placeholder.
|
||||
const KIMI_REASONING_PLACEHOLDER = " ";
|
||||
|
||||
export class OpenCodeGoExecutor extends BaseExecutor {
|
||||
constructor() {
|
||||
super("opencode-go", PROVIDERS["opencode-go"]);
|
||||
@@ -39,13 +36,6 @@ export class OpenCodeGoExecutor extends BaseExecutor {
|
||||
}
|
||||
|
||||
transformRequest(model, body) {
|
||||
if (!model?.startsWith?.("kimi-") || !body?.messages) return body;
|
||||
const messages = body.messages.map(m => {
|
||||
if (m?.role === "assistant" && Array.isArray(m.tool_calls) && !("reasoning_content" in m)) {
|
||||
return { ...m, reasoning_content: KIMI_REASONING_PLACEHOLDER };
|
||||
}
|
||||
return m;
|
||||
});
|
||||
return { ...body, messages };
|
||||
return injectReasoningContent({ provider: this.provider, model, body });
|
||||
}
|
||||
}
|
||||
|
||||
37
open-sse/utils/reasoningContentInjector.js
Normal file
37
open-sse/utils/reasoningContentInjector.js
Normal file
@@ -0,0 +1,37 @@
|
||||
// Some thinking-mode providers (DeepSeek, Kimi, ...) require reasoning_content
|
||||
// to be echoed back on assistant messages. Clients in OpenAI format don't send it,
|
||||
// so we inject a non-empty placeholder to satisfy upstream validation.
|
||||
|
||||
const PLACEHOLDER = " ";
|
||||
|
||||
// Provider-level rules: keyed by executor.provider
|
||||
const PROVIDER_RULES = {
|
||||
deepseek: { scope: "all" }
|
||||
};
|
||||
|
||||
// Model-level rules: matched by predicate against model id
|
||||
const MODEL_RULES = [
|
||||
{ match: m => m?.startsWith?.("kimi-"), scope: "toolCalls" },
|
||||
{ match: m => m?.startsWith?.("deepseek-"), scope: "all" }
|
||||
];
|
||||
|
||||
function shouldInject(message, scope) {
|
||||
if (message?.role !== "assistant" || "reasoning_content" in message) return false;
|
||||
if (scope === "toolCalls") return Array.isArray(message.tool_calls);
|
||||
return true;
|
||||
}
|
||||
|
||||
function applyRule(body, rule) {
|
||||
if (!rule || !body?.messages) return body;
|
||||
const messages = body.messages.map(m =>
|
||||
shouldInject(m, rule.scope) ? { ...m, reasoning_content: PLACEHOLDER } : m
|
||||
);
|
||||
return { ...body, messages };
|
||||
}
|
||||
|
||||
export function injectReasoningContent({ provider, model, body }) {
|
||||
const providerRule = PROVIDER_RULES[provider];
|
||||
const modelRule = MODEL_RULES.find(r => r.match(model));
|
||||
const rule = providerRule || modelRule;
|
||||
return applyRule(body, rule);
|
||||
}
|
||||
Reference in New Issue
Block a user