From 3d4dbdc0e7b4d98f470a5d2efef95214bf38a636 Mon Sep 17 00:00:00 2001 From: decolua Date: Mon, 23 Mar 2026 09:29:31 +0700 Subject: [PATCH] fix(chat): pick last non-empty message for Codex Responses SSE Root cause: Codex/OpenAI Responses streams multiple alternating reasoning and message output items. The first message block often has empty output_text; the visible answer lives in a later message. Previous code used output.find() which always picked the first (empty) message block. Fix: walk message items from end and use the last message whose extracted text is non-empty; fall back to final message if all are empty. Note: Removed debug logging code from original PR #383 to keep implementation clean. Co-authored-by: lokinh Made-with: Cursor --- .../handlers/chatCore/sseToJsonHandler.js | 28 +++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/open-sse/handlers/chatCore/sseToJsonHandler.js b/open-sse/handlers/chatCore/sseToJsonHandler.js index 8df3996c..01b40b04 100644 --- a/open-sse/handlers/chatCore/sseToJsonHandler.js +++ b/open-sse/handlers/chatCore/sseToJsonHandler.js @@ -5,6 +5,31 @@ import { FORMATS } from "../../translator/formats.js"; import { buildRequestDetail, extractRequestConfig, saveUsageStats } from "./requestDetail.js"; import { saveRequestDetail, appendRequestLog } from "@/lib/usageDb.js"; +function textFromResponsesMessageItem(item) { + if (!item?.content || !Array.isArray(item.content)) return ""; + const byType = item.content.find((c) => c.type === "output_text"); + if (typeof byType?.text === "string") return byType.text; + const anyText = item.content.find((c) => typeof c.text === "string"); + if (typeof anyText?.text === "string") return anyText.text; + return ""; +} + +/** + * Codex / Responses API may emit many alternating reasoning + message items. + * Early message blocks often have empty output_text; the user-visible answer is usually in the last non-empty message. + */ +function pickAssistantMessageForChatCompletion(output) { + if (!Array.isArray(output)) return { msgItem: null, textContent: null }; + const messages = output.filter((item) => item?.type === "message"); + if (messages.length === 0) return { msgItem: null, textContent: null }; + for (let i = messages.length - 1; i >= 0; i--) { + const text = textFromResponsesMessageItem(messages[i]); + if (text.length > 0) return { msgItem: messages[i], textContent: text }; + } + const last = messages[messages.length - 1]; + return { msgItem: last, textContent: textFromResponsesMessageItem(last) }; +} + /** * Parse OpenAI-style SSE text into a single chat completion JSON. * Used when provider forces streaming but client wants non-streaming. @@ -79,8 +104,7 @@ export async function handleForcedSSEToJson({ providerResponse, sourceFormat, pr appendLog({ tokens: usage, status: "200 OK" }); saveUsageStats({ provider, model, tokens: usage, connectionId, apiKey, endpoint: clientRawRequest?.endpoint }); - const msgItem = jsonResponse.output?.find(item => item.type === "message"); - const textContent = msgItem?.content?.find(c => c.type === "output_text")?.text || msgItem?.content?.[0]?.text || null; + const { msgItem, textContent } = pickAssistantMessageForChatCompletion(jsonResponse.output); const totalLatency = Date.now() - requestStartTime; saveRequestDetail(buildRequestDetail({