mirror of
https://github.com/decolua/9router.git
synced 2026-05-08 12:01:28 +00:00
- Fix hydration mismatches and initialization errors - Add /v1/models endpoint for OpenAI clients - Add Codex response translator (Responses → OpenAI) - Fix circular dependencies and PropTypes - Add Material Symbols font and CSS fixes - Update README with deployment guide Co-merged from PR #18 (14/15 commits, skipped debug)
508 lines
13 KiB
JavaScript
508 lines
13 KiB
JavaScript
/**
|
|
* Translator: OpenAI Chat Completions → OpenAI Responses API (response)
|
|
* Converts streaming chunks from Chat Completions to Responses API events
|
|
*/
|
|
import { register } from "../index.js";
|
|
import { FORMATS } from "../formats.js";
|
|
|
|
/**
|
|
* Translate OpenAI chunk to Responses API events
|
|
* @returns {Array} Array of events with { event, data } structure
|
|
*/
|
|
function openaiToOpenAIResponsesResponse(chunk, state) {
|
|
if (!chunk) {
|
|
return flushEvents(state);
|
|
}
|
|
|
|
if (!chunk.choices?.length) return [];
|
|
|
|
const events = [];
|
|
const nextSeq = () => ++state.seq;
|
|
|
|
const emit = (eventType, data) => {
|
|
data.sequence_number = nextSeq();
|
|
events.push({ event: eventType, data });
|
|
};
|
|
|
|
const choice = chunk.choices[0];
|
|
const idx = choice.index || 0;
|
|
const delta = choice.delta || {};
|
|
|
|
// Emit initial events
|
|
if (!state.started) {
|
|
state.started = true;
|
|
state.responseId = chunk.id ? `resp_${chunk.id}` : state.responseId;
|
|
|
|
emit("response.created", {
|
|
type: "response.created",
|
|
response: {
|
|
id: state.responseId,
|
|
object: "response",
|
|
created_at: state.created,
|
|
status: "in_progress",
|
|
background: false,
|
|
error: null,
|
|
output: []
|
|
}
|
|
});
|
|
|
|
emit("response.in_progress", {
|
|
type: "response.in_progress",
|
|
response: {
|
|
id: state.responseId,
|
|
object: "response",
|
|
created_at: state.created,
|
|
status: "in_progress"
|
|
}
|
|
});
|
|
}
|
|
|
|
// Handle reasoning_content
|
|
if (delta.reasoning_content) {
|
|
startReasoning(state, emit, idx);
|
|
emitReasoningDelta(state, emit, delta.reasoning_content);
|
|
}
|
|
|
|
// Handle text content
|
|
if (delta.content) {
|
|
let content = delta.content;
|
|
|
|
if (content.includes("<think>")) {
|
|
state.inThinking = true;
|
|
content = content.replace("<think>", "");
|
|
startReasoning(state, emit, idx);
|
|
}
|
|
|
|
if (content.includes("</think>")) {
|
|
const parts = content.split("</think>");
|
|
const thinkPart = parts[0];
|
|
const textPart = parts.slice(1).join("</think>");
|
|
if (thinkPart) emitReasoningDelta(state, emit, thinkPart);
|
|
closeReasoning(state, emit);
|
|
state.inThinking = false;
|
|
content = textPart;
|
|
}
|
|
|
|
if (state.inThinking && content) {
|
|
emitReasoningDelta(state, emit, content);
|
|
return events;
|
|
}
|
|
|
|
if (content) {
|
|
emitTextContent(state, emit, idx, content);
|
|
}
|
|
}
|
|
|
|
// Handle tool_calls
|
|
if (delta.tool_calls) {
|
|
closeMessage(state, emit, idx);
|
|
for (const tc of delta.tool_calls) {
|
|
emitToolCall(state, emit, tc);
|
|
}
|
|
}
|
|
|
|
// Handle finish_reason
|
|
if (choice.finish_reason) {
|
|
for (const i in state.msgItemAdded) closeMessage(state, emit, i);
|
|
closeReasoning(state, emit);
|
|
for (const i in state.funcCallIds) closeToolCall(state, emit, i);
|
|
sendCompleted(state, emit);
|
|
}
|
|
|
|
return events;
|
|
}
|
|
|
|
// Helper functions
|
|
function startReasoning(state, emit, idx) {
|
|
if (!state.reasoningId) {
|
|
state.reasoningId = `rs_${state.responseId}_${idx}`;
|
|
state.reasoningIndex = idx;
|
|
|
|
emit("response.output_item.added", {
|
|
type: "response.output_item.added",
|
|
output_index: idx,
|
|
item: { id: state.reasoningId, type: "reasoning", summary: [] }
|
|
});
|
|
|
|
emit("response.reasoning_summary_part.added", {
|
|
type: "response.reasoning_summary_part.added",
|
|
item_id: state.reasoningId,
|
|
output_index: idx,
|
|
summary_index: 0,
|
|
part: { type: "summary_text", text: "" }
|
|
});
|
|
state.reasoningPartAdded = true;
|
|
}
|
|
}
|
|
|
|
function emitReasoningDelta(state, emit, text) {
|
|
if (!text) return;
|
|
state.reasoningBuf += text;
|
|
emit("response.reasoning_summary_text.delta", {
|
|
type: "response.reasoning_summary_text.delta",
|
|
item_id: state.reasoningId,
|
|
output_index: state.reasoningIndex,
|
|
summary_index: 0,
|
|
delta: text
|
|
});
|
|
}
|
|
|
|
function closeReasoning(state, emit) {
|
|
if (state.reasoningId && !state.reasoningDone) {
|
|
state.reasoningDone = true;
|
|
|
|
emit("response.reasoning_summary_text.done", {
|
|
type: "response.reasoning_summary_text.done",
|
|
item_id: state.reasoningId,
|
|
output_index: state.reasoningIndex,
|
|
summary_index: 0,
|
|
text: state.reasoningBuf
|
|
});
|
|
|
|
emit("response.reasoning_summary_part.done", {
|
|
type: "response.reasoning_summary_part.done",
|
|
item_id: state.reasoningId,
|
|
output_index: state.reasoningIndex,
|
|
summary_index: 0,
|
|
part: { type: "summary_text", text: state.reasoningBuf }
|
|
});
|
|
|
|
emit("response.output_item.done", {
|
|
type: "response.output_item.done",
|
|
output_index: state.reasoningIndex,
|
|
item: {
|
|
id: state.reasoningId,
|
|
type: "reasoning",
|
|
summary: [{ type: "summary_text", text: state.reasoningBuf }]
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
function emitTextContent(state, emit, idx, content) {
|
|
if (!state.msgItemAdded[idx]) {
|
|
state.msgItemAdded[idx] = true;
|
|
const msgId = `msg_${state.responseId}_${idx}`;
|
|
|
|
emit("response.output_item.added", {
|
|
type: "response.output_item.added",
|
|
output_index: idx,
|
|
item: { id: msgId, type: "message", content: [], role: "assistant" }
|
|
});
|
|
}
|
|
|
|
if (!state.msgContentAdded[idx]) {
|
|
state.msgContentAdded[idx] = true;
|
|
|
|
emit("response.content_part.added", {
|
|
type: "response.content_part.added",
|
|
item_id: `msg_${state.responseId}_${idx}`,
|
|
output_index: idx,
|
|
content_index: 0,
|
|
part: { type: "output_text", annotations: [], logprobs: [], text: "" }
|
|
});
|
|
}
|
|
|
|
emit("response.output_text.delta", {
|
|
type: "response.output_text.delta",
|
|
item_id: `msg_${state.responseId}_${idx}`,
|
|
output_index: idx,
|
|
content_index: 0,
|
|
delta: content,
|
|
logprobs: []
|
|
});
|
|
|
|
if (!state.msgTextBuf[idx]) state.msgTextBuf[idx] = "";
|
|
state.msgTextBuf[idx] += content;
|
|
}
|
|
|
|
function closeMessage(state, emit, idx) {
|
|
if (state.msgItemAdded[idx] && !state.msgItemDone[idx]) {
|
|
state.msgItemDone[idx] = true;
|
|
const fullText = state.msgTextBuf[idx] || "";
|
|
const msgId = `msg_${state.responseId}_${idx}`;
|
|
|
|
emit("response.output_text.done", {
|
|
type: "response.output_text.done",
|
|
item_id: msgId,
|
|
output_index: parseInt(idx),
|
|
content_index: 0,
|
|
text: fullText,
|
|
logprobs: []
|
|
});
|
|
|
|
emit("response.content_part.done", {
|
|
type: "response.content_part.done",
|
|
item_id: msgId,
|
|
output_index: parseInt(idx),
|
|
content_index: 0,
|
|
part: { type: "output_text", annotations: [], logprobs: [], text: fullText }
|
|
});
|
|
|
|
emit("response.output_item.done", {
|
|
type: "response.output_item.done",
|
|
output_index: parseInt(idx),
|
|
item: {
|
|
id: msgId,
|
|
type: "message",
|
|
content: [{ type: "output_text", annotations: [], logprobs: [], text: fullText }],
|
|
role: "assistant"
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
function emitToolCall(state, emit, tc) {
|
|
const tcIdx = tc.index ?? 0;
|
|
const newCallId = tc.id;
|
|
const funcName = tc.function?.name;
|
|
|
|
if (funcName) state.funcNames[tcIdx] = funcName;
|
|
|
|
if (!state.funcCallIds[tcIdx] && newCallId) {
|
|
state.funcCallIds[tcIdx] = newCallId;
|
|
|
|
emit("response.output_item.added", {
|
|
type: "response.output_item.added",
|
|
output_index: tcIdx,
|
|
item: {
|
|
id: `fc_${newCallId}`,
|
|
type: "function_call",
|
|
arguments: "",
|
|
call_id: newCallId,
|
|
name: state.funcNames[tcIdx] || ""
|
|
}
|
|
});
|
|
}
|
|
|
|
if (!state.funcArgsBuf[tcIdx]) state.funcArgsBuf[tcIdx] = "";
|
|
|
|
if (tc.function?.arguments) {
|
|
const refCallId = state.funcCallIds[tcIdx] || newCallId;
|
|
if (refCallId) {
|
|
emit("response.function_call_arguments.delta", {
|
|
type: "response.function_call_arguments.delta",
|
|
item_id: `fc_${refCallId}`,
|
|
output_index: tcIdx,
|
|
delta: tc.function.arguments
|
|
});
|
|
}
|
|
state.funcArgsBuf[tcIdx] += tc.function.arguments;
|
|
}
|
|
}
|
|
|
|
function closeToolCall(state, emit, idx) {
|
|
const callId = state.funcCallIds[idx];
|
|
if (callId && !state.funcItemDone[idx]) {
|
|
const args = state.funcArgsBuf[idx] || "{}";
|
|
|
|
emit("response.function_call_arguments.done", {
|
|
type: "response.function_call_arguments.done",
|
|
item_id: `fc_${callId}`,
|
|
output_index: parseInt(idx),
|
|
arguments: args
|
|
});
|
|
|
|
emit("response.output_item.done", {
|
|
type: "response.output_item.done",
|
|
output_index: parseInt(idx),
|
|
item: {
|
|
id: `fc_${callId}`,
|
|
type: "function_call",
|
|
arguments: args,
|
|
call_id: callId,
|
|
name: state.funcNames[idx] || ""
|
|
}
|
|
});
|
|
|
|
state.funcItemDone[idx] = true;
|
|
state.funcArgsDone[idx] = true;
|
|
}
|
|
}
|
|
|
|
function sendCompleted(state, emit) {
|
|
if (!state.completedSent) {
|
|
state.completedSent = true;
|
|
emit("response.completed", {
|
|
type: "response.completed",
|
|
response: {
|
|
id: state.responseId,
|
|
object: "response",
|
|
created_at: state.created,
|
|
status: "completed",
|
|
background: false,
|
|
error: null
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
function flushEvents(state) {
|
|
if (state.completedSent) return [];
|
|
|
|
const events = [];
|
|
const nextSeq = () => ++state.seq;
|
|
const emit = (eventType, data) => {
|
|
data.sequence_number = nextSeq();
|
|
events.push({ event: eventType, data });
|
|
};
|
|
|
|
for (const i in state.msgItemAdded) closeMessage(state, emit, i);
|
|
closeReasoning(state, emit);
|
|
for (const i in state.funcCallIds) closeToolCall(state, emit, i);
|
|
sendCompleted(state, emit);
|
|
|
|
return events;
|
|
}
|
|
|
|
/**
|
|
* Translate OpenAI Responses API chunk to OpenAI Chat Completions format
|
|
* This is for when Codex returns data and we need to send it to an OpenAI-compatible client
|
|
*/
|
|
function openaiResponsesToOpenAIResponse(chunk, state) {
|
|
if (!chunk) {
|
|
// Flush: send final chunk with finish_reason
|
|
if (!state.finishReasonSent && state.started) {
|
|
state.finishReasonSent = true;
|
|
return {
|
|
id: state.chatId || `chatcmpl-${Date.now()}`,
|
|
object: "chat.completion.chunk",
|
|
created: state.created || Math.floor(Date.now() / 1000),
|
|
model: state.model || "gpt-4",
|
|
choices: [{
|
|
index: 0,
|
|
delta: {},
|
|
finish_reason: "stop"
|
|
}]
|
|
};
|
|
}
|
|
return null;
|
|
}
|
|
|
|
// Handle different event types from Responses API
|
|
const eventType = chunk.type || chunk.event;
|
|
const data = chunk.data || chunk;
|
|
|
|
// Initialize state
|
|
if (!state.started) {
|
|
state.started = true;
|
|
state.chatId = `chatcmpl-${Date.now()}`;
|
|
state.created = Math.floor(Date.now() / 1000);
|
|
state.toolCallIndex = 0;
|
|
state.currentToolCallId = null;
|
|
}
|
|
|
|
// Text content delta
|
|
if (eventType === "response.output_text.delta") {
|
|
const delta = data.delta || "";
|
|
if (!delta) return null;
|
|
|
|
return {
|
|
id: state.chatId,
|
|
object: "chat.completion.chunk",
|
|
created: state.created,
|
|
model: state.model || "gpt-4",
|
|
choices: [{
|
|
index: 0,
|
|
delta: { content: delta },
|
|
finish_reason: null
|
|
}]
|
|
};
|
|
}
|
|
|
|
// Text content done (ignore, we handle via delta)
|
|
if (eventType === "response.output_text.done") {
|
|
return null;
|
|
}
|
|
|
|
// Function call started
|
|
if (eventType === "response.output_item.added" && data.item?.type === "function_call") {
|
|
const item = data.item;
|
|
state.currentToolCallId = item.call_id || `call_${Date.now()}`;
|
|
|
|
return {
|
|
id: state.chatId,
|
|
object: "chat.completion.chunk",
|
|
created: state.created,
|
|
model: state.model || "gpt-4",
|
|
choices: [{
|
|
index: 0,
|
|
delta: {
|
|
tool_calls: [{
|
|
index: state.toolCallIndex,
|
|
id: state.currentToolCallId,
|
|
type: "function",
|
|
function: {
|
|
name: item.name || "",
|
|
arguments: ""
|
|
}
|
|
}]
|
|
},
|
|
finish_reason: null
|
|
}]
|
|
};
|
|
}
|
|
|
|
// Function call arguments delta
|
|
if (eventType === "response.function_call_arguments.delta") {
|
|
const argsDelta = data.delta || "";
|
|
if (!argsDelta) return null;
|
|
|
|
return {
|
|
id: state.chatId,
|
|
object: "chat.completion.chunk",
|
|
created: state.created,
|
|
model: state.model || "gpt-4",
|
|
choices: [{
|
|
index: 0,
|
|
delta: {
|
|
tool_calls: [{
|
|
index: state.toolCallIndex,
|
|
function: { arguments: argsDelta }
|
|
}]
|
|
},
|
|
finish_reason: null
|
|
}]
|
|
};
|
|
}
|
|
|
|
// Function call done
|
|
if (eventType === "response.output_item.done" && data.item?.type === "function_call") {
|
|
state.toolCallIndex++;
|
|
return null;
|
|
}
|
|
|
|
// Response completed
|
|
if (eventType === "response.completed") {
|
|
if (!state.finishReasonSent) {
|
|
state.finishReasonSent = true;
|
|
return {
|
|
id: state.chatId,
|
|
object: "chat.completion.chunk",
|
|
created: state.created,
|
|
model: state.model || "gpt-4",
|
|
choices: [{
|
|
index: 0,
|
|
delta: {},
|
|
finish_reason: "stop"
|
|
}]
|
|
};
|
|
}
|
|
return null;
|
|
}
|
|
|
|
// Reasoning events (convert to content or skip)
|
|
if (eventType === "response.reasoning_summary_text.delta") {
|
|
// Optionally include reasoning as content, or skip
|
|
return null;
|
|
}
|
|
|
|
// Ignore other events
|
|
return null;
|
|
}
|
|
|
|
// Register both directions
|
|
register(FORMATS.OPENAI, FORMATS.OPENAI_RESPONSES, null, openaiToOpenAIResponsesResponse);
|
|
register(FORMATS.OPENAI_RESPONSES, FORMATS.OPENAI, null, openaiResponsesToOpenAIResponse);
|
|
|