From e8aa5e2222483f0af627bbe1017747a1cdbbe1da Mon Sep 17 00:00:00 2001 From: decolua Date: Wed, 29 Apr 2026 09:34:24 +0700 Subject: [PATCH] Fix : Add reasoning_content placeholder for DeepSeek thinking models --- open-sse/executors/default.js | 5 +++ open-sse/executors/opencode-go.js | 14 ++------ open-sse/utils/reasoningContentInjector.js | 37 ++++++++++++++++++++++ 3 files changed, 44 insertions(+), 12 deletions(-) create mode 100644 open-sse/utils/reasoningContentInjector.js diff --git a/open-sse/executors/default.js b/open-sse/executors/default.js index 1db029a3..5e29ef76 100644 --- a/open-sse/executors/default.js +++ b/open-sse/executors/default.js @@ -4,12 +4,17 @@ import { OAUTH_ENDPOINTS, buildKimiHeaders } from "../config/appConstants.js"; import { buildClineHeaders } from "../../src/shared/utils/clineAuth.js"; import { getCachedClaudeHeaders } from "../utils/claudeHeaderCache.js"; import { proxyAwareFetch } from "../utils/proxyFetch.js"; +import { injectReasoningContent } from "../utils/reasoningContentInjector.js"; export class DefaultExecutor extends BaseExecutor { constructor(provider) { super(provider, PROVIDERS[provider] || PROVIDERS.openai); } + transformRequest(model, body) { + return injectReasoningContent({ provider: this.provider, model, body }); + } + buildUrl(model, stream, urlIndex = 0, credentials = null) { if (this.provider?.startsWith?.("openai-compatible-")) { const baseUrl = credentials?.providerSpecificData?.baseUrl || "https://api.openai.com/v1"; diff --git a/open-sse/executors/opencode-go.js b/open-sse/executors/opencode-go.js index c75a4724..39e9e0aa 100644 --- a/open-sse/executors/opencode-go.js +++ b/open-sse/executors/opencode-go.js @@ -1,15 +1,12 @@ import { BaseExecutor } from "./base.js"; import { PROVIDERS } from "../config/providers.js"; +import { injectReasoningContent } from "../utils/reasoningContentInjector.js"; // Models that use /zen/go/v1/messages (Anthropic/Claude format + x-api-key auth) const CLAUDE_FORMAT_MODELS = new Set(["minimax-m2.5", "minimax-m2.7"]); const BASE = "https://opencode.ai/zen/go/v1"; -// Kimi (Moonshot) requires reasoning_content on assistant tool_call messages when thinking is on. -// OpenAI-format clients don't send it -> upstream 400. Inject a non-empty placeholder. -const KIMI_REASONING_PLACEHOLDER = " "; - export class OpenCodeGoExecutor extends BaseExecutor { constructor() { super("opencode-go", PROVIDERS["opencode-go"]); @@ -39,13 +36,6 @@ export class OpenCodeGoExecutor extends BaseExecutor { } transformRequest(model, body) { - if (!model?.startsWith?.("kimi-") || !body?.messages) return body; - const messages = body.messages.map(m => { - if (m?.role === "assistant" && Array.isArray(m.tool_calls) && !("reasoning_content" in m)) { - return { ...m, reasoning_content: KIMI_REASONING_PLACEHOLDER }; - } - return m; - }); - return { ...body, messages }; + return injectReasoningContent({ provider: this.provider, model, body }); } } diff --git a/open-sse/utils/reasoningContentInjector.js b/open-sse/utils/reasoningContentInjector.js new file mode 100644 index 00000000..80cd9315 --- /dev/null +++ b/open-sse/utils/reasoningContentInjector.js @@ -0,0 +1,37 @@ +// Some thinking-mode providers (DeepSeek, Kimi, ...) require reasoning_content +// to be echoed back on assistant messages. Clients in OpenAI format don't send it, +// so we inject a non-empty placeholder to satisfy upstream validation. + +const PLACEHOLDER = " "; + +// Provider-level rules: keyed by executor.provider +const PROVIDER_RULES = { + deepseek: { scope: "all" } +}; + +// Model-level rules: matched by predicate against model id +const MODEL_RULES = [ + { match: m => m?.startsWith?.("kimi-"), scope: "toolCalls" }, + { match: m => m?.startsWith?.("deepseek-"), scope: "all" } +]; + +function shouldInject(message, scope) { + if (message?.role !== "assistant" || "reasoning_content" in message) return false; + if (scope === "toolCalls") return Array.isArray(message.tool_calls); + return true; +} + +function applyRule(body, rule) { + if (!rule || !body?.messages) return body; + const messages = body.messages.map(m => + shouldInject(m, rule.scope) ? { ...m, reasoning_content: PLACEHOLDER } : m + ); + return { ...body, messages }; +} + +export function injectReasoningContent({ provider, model, body }) { + const providerRule = PROVIDER_RULES[provider]; + const modelRule = MODEL_RULES.find(r => r.match(model)); + const rule = providerRule || modelRule; + return applyRule(body, rule); +}