Enhance chat handling and introduce Caveman feature

- Refactored handleChatCore to include Caveman functionality, allowing for terse-style system prompts to reduce output token usage.
- Updated APIPageClient to manage Caveman settings, including enabling/disabling and selecting compression levels.
- Adjusted AntigravityExecutor to consolidate function declarations for compatibility with Gemini.
- Removed unnecessary console logs during translator initialization across multiple routes.
This commit is contained in:
decolua
2026-04-30 18:00:38 +07:00
parent b3fb20e237
commit 936d65ae1c
14 changed files with 265 additions and 48 deletions

View File

@@ -67,24 +67,17 @@ export class AntigravityExecutor extends BaseExecutor {
let tools = body.request?.tools;
if (tools && tools.length > 0) {
tools = tools
.map(group => {
if (!group.functionDeclarations) return group;
const cleanedDeclarations = group.functionDeclarations.map(fn => ({
...fn,
name: sanitizeFunctionName(fn.name),
parameters: fn.parameters
? cleanJSONSchemaForAntigravity(structuredClone(fn.parameters))
: { type: "object", properties: { reason: { type: "string", description: "Brief explanation" } }, required: ["reason"] }
}));
return {
...group,
functionDeclarations: cleanedDeclarations
};
})
.filter(group => group.functionDeclarations?.length > 0)
.slice(0, 1);
// Merge all groups into a single functionDeclarations group (Gemini expects 1 group)
const allDeclarations = tools.flatMap(group =>
(group.functionDeclarations || []).map(fn => ({
...fn,
name: sanitizeFunctionName(fn.name),
parameters: fn.parameters
? cleanJSONSchemaForAntigravity(structuredClone(fn.parameters))
: { type: "object", properties: { reason: { type: "string", description: "Brief explanation" } }, required: ["reason"] }
}))
);
tools = allDeclarations.length > 0 ? [{ functionDeclarations: allDeclarations }] : [];
}
const { tools: _originalTools, toolConfig: _originalToolConfig, ...requestWithoutTools } = body.request || {};

View File

@@ -16,6 +16,8 @@ import { handleForcedSSEToJson } from "./chatCore/sseToJsonHandler.js";
import { handleNonStreamingResponse } from "./chatCore/nonStreamingHandler.js";
import { handleStreamingResponse, buildOnStreamComplete } from "./chatCore/streamingHandler.js";
import { detectClientTool, isNativePassthrough } from "../utils/clientDetector.js";
import { injectCaveman } from "../rtk/caveman.js";
import { compressMessages, formatRtkLog } from "../rtk/index.js";
/**
* Core chat handler - shared between SSE and Worker
@@ -24,7 +26,7 @@ import { detectClientTool, isNativePassthrough } from "../utils/clientDetector.j
* @param {object} options.credentials - Provider credentials
* @param {string} options.sourceFormatOverride - Override detected source format (e.g. "openai-responses")
*/
export async function handleChatCore({ body, modelInfo, credentials, log, onCredentialsRefreshed, onRequestSuccess, onDisconnect, clientRawRequest, connectionId, userAgent, apiKey, ccFilterNaming, rtkEnabled, sourceFormatOverride, providerThinking }) {
export async function handleChatCore({ body, modelInfo, credentials, log, onCredentialsRefreshed, onRequestSuccess, onDisconnect, clientRawRequest, connectionId, userAgent, apiKey, ccFilterNaming, rtkEnabled, cavemanEnabled, cavemanLevel, sourceFormatOverride, providerThinking }) {
const { provider, model } = modelInfo;
const requestStartTime = Date.now();
@@ -82,7 +84,7 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
log?.debug?.("PASSTHROUGH", `${clientTool}${provider} | native lossless`);
translatedBody = { ...body, model };
} else {
translatedBody = translateRequest(sourceFormat, targetFormat, model, body, stream, credentials, provider, reqLogger, stripList, connectionId, rtkEnabled, clientTool);
translatedBody = translateRequest(sourceFormat, targetFormat, model, body, stream, credentials, provider, reqLogger, stripList, connectionId, clientTool);
if (!translatedBody) {
trackPendingRequest(model, provider, connectionId, false, true);
return createErrorResult(HTTP_STATUS.BAD_REQUEST, `Failed to translate request for ${sourceFormat}${targetFormat}`);
@@ -92,6 +94,21 @@ export async function handleChatCore({ body, modelInfo, credentials, log, onCred
translatedBody.model = model;
}
// Token savers: applied at the final body just before dispatch
// Covers both passthrough (source shape) and translated (target shape) flows
const finalFormat = passthrough ? sourceFormat : targetFormat;
// RTK: compress tool_result content
const rtkStats = compressMessages(translatedBody, rtkEnabled);
const rtkLine = formatRtkLog(rtkStats);
if (rtkLine) console.log(rtkLine);
// Caveman: inject terse-style system prompt
if (cavemanEnabled && cavemanLevel) {
injectCaveman(translatedBody, finalFormat, cavemanLevel);
log?.debug?.("CAVEMAN", `${cavemanLevel} | ${finalFormat}`);
}
const executor = getExecutor(provider);
trackPendingRequest(model, provider, connectionId, true);
appendRequestLog({ model, provider, connectionId, status: "PENDING" }).catch(() => {});

100
open-sse/rtk/caveman.js Normal file
View File

@@ -0,0 +1,100 @@
// Caveman injector: appends a caveman-style instruction into the system message
// of the final request body, just before it is dispatched to the provider executor.
// Dispatches by format so it works for both translated and native-passthrough flows.
import { FORMATS } from "../translator/formats.js";
import { CAVEMAN_PROMPTS } from "./cavemanPrompts.js";
const SEP = "\n\n";
export function injectCaveman(body, format, level) {
const prompt = CAVEMAN_PROMPTS[level];
if (!body || !prompt) return;
switch (format) {
case FORMATS.CLAUDE:
injectClaudeSystem(body, prompt);
return;
case FORMATS.GEMINI:
case FORMATS.GEMINI_CLI:
case FORMATS.VERTEX:
case FORMATS.ANTIGRAVITY:
// Antigravity wraps Gemini shape in body.request → injectGeminiSystem handles it
injectGeminiSystem(body, prompt);
return;
default:
// OpenAI and OpenAI-shaped formats (responses/codex/cursor/kiro/ollama)
injectMessagesSystem(body, prompt);
}
}
// OpenAI-shaped: messages[] (chat) or input[] (responses) or instructions (responses string)
function injectMessagesSystem(body, prompt) {
// OpenAI Responses API: top-level string field
if (typeof body.instructions === "string") {
body.instructions = body.instructions
? `${body.instructions}${SEP}${prompt}`
: prompt;
return;
}
const arr = Array.isArray(body.messages) ? body.messages
: Array.isArray(body.input) ? body.input
: null;
if (!arr) return;
const idx = arr.findIndex(m => m && (m.role === "system" || m.role === "developer"));
if (idx >= 0) {
appendToOpenAIMessage(arr[idx], prompt);
} else {
arr.unshift({ role: "system", content: prompt });
}
}
function appendToOpenAIMessage(msg, prompt) {
if (typeof msg.content === "string") {
msg.content = `${msg.content}${SEP}${prompt}`;
} else if (Array.isArray(msg.content)) {
// Responses-style array of parts {type:"input_text"|"text", text}
msg.content.push({ type: "input_text", text: prompt });
} else {
msg.content = prompt;
}
}
// Claude shape: body.system as string | array of {type:"text", text}
// Insert before the last cache_control block to keep caveman inside the cached prefix.
function injectClaudeSystem(body, prompt) {
if (typeof body.system === "string" && body.system.length > 0) {
body.system = `${body.system}${SEP}${prompt}`;
return;
}
if (Array.isArray(body.system)) {
const block = { type: "text", text: prompt };
let lastCacheIdx = -1;
for (let i = body.system.length - 1; i >= 0; i--) {
if (body.system[i]?.cache_control) { lastCacheIdx = i; break; }
}
if (lastCacheIdx >= 0) {
body.system.splice(lastCacheIdx, 0, block);
} else {
body.system.push(block);
}
return;
}
body.system = prompt;
}
// Gemini shape: body.system_instruction | body.systemInstruction | body.request.systemInstruction
// Each shape: { parts: [{ text }] }
function injectGeminiSystem(body, prompt) {
const target = body.request && typeof body.request === "object" ? body.request : body;
const useSnake = Object.prototype.hasOwnProperty.call(target, "system_instruction");
const key = useSnake ? "system_instruction" : "systemInstruction";
const sys = target[key];
if (sys && Array.isArray(sys.parts)) {
sys.parts.push({ text: prompt });
return;
}
target[key] = { parts: [{ text: prompt }] };
}

View File

@@ -0,0 +1,35 @@
// Caveman intensity-level prompts injected into system message to reduce output tokens.
// Adapted from caveman skill (https://github.com/JuliusBrussee/caveman).
export const CAVEMAN_LEVELS = {
LITE: "lite",
FULL: "full",
ULTRA: "ultra",
};
const SHARED_BOUNDARIES = "Code blocks, file paths, commands, errors, URLs: keep exact. Security warnings, irreversible action confirmations, multi-step ordered sequences: write normal. Resume terse style after.";
export const CAVEMAN_PROMPTS = {
[CAVEMAN_LEVELS.LITE]: [
"Respond tersely. Keep grammar and full sentences but drop filler, hedging and pleasantries (just/really/basically/sure/of course/I'd be happy to).",
"Pattern: state the thing, the action, the reason. Then next step.",
SHARED_BOUNDARIES,
"Active every response until user asks for normal mode.",
].join(" "),
[CAVEMAN_LEVELS.FULL]: [
"Respond like terse caveman. All technical substance stay exact, only fluff die.",
"Drop: articles (a/an/the), filler (just/really/basically/actually/simply), pleasantries, hedging. Fragments OK. Short synonyms (big not extensive, fix not implement a solution for).",
"Pattern: [thing] [action] [reason]. [next step].",
SHARED_BOUNDARIES,
"Active every response until user asks for normal mode.",
].join(" "),
[CAVEMAN_LEVELS.ULTRA]: [
"Respond ultra-terse. Maximum compression. Telegraphic.",
"Abbreviate (DB/auth/config/req/res/fn/impl), strip conjunctions, use arrows for causality (X → Y). One word when one word enough.",
"Pattern: [thing] → [result]. [fix].",
SHARED_BOUNDARIES,
"Active every response until user asks for normal mode.",
].join(" "),
};

View File

@@ -5,7 +5,6 @@ import { cloakClaudeTools } from "../utils/claudeCloaking.js";
import { filterToOpenAIFormat } from "./helpers/openaiHelper.js";
import { normalizeThinkingConfig } from "../services/provider.js";
import { AntigravityExecutor } from "../executors/antigravity.js";
import { compressMessages, formatRtkLog } from "../rtk/index.js";
// Registry for translators
const requestRegistry = new Map();
@@ -71,17 +70,10 @@ function stripContentTypes(body, stripList = []) {
}
// Translate request: source -> openai -> target
export function translateRequest(sourceFormat, targetFormat, model, body, stream = true, credentials = null, provider = null, reqLogger = null, stripList = [], connectionId = null, rtkEnabled = false, clientTool = null) {
export function translateRequest(sourceFormat, targetFormat, model, body, stream = true, credentials = null, provider = null, reqLogger = null, stripList = [], connectionId = null, clientTool = null) {
ensureInitialized();
let result = body;
// RTK: compress tool_result content before any translation (shape-agnostic)
const rtkStats = compressMessages(result, rtkEnabled);
if (rtkStats) {
const line = formatRtkLog(rtkStats);
if (line) console.log(line);
}
// Strip explicit content types (opt-in via strip[] in PROVIDER_MODELS entry)
stripContentTypes(result, stripList);

View File

@@ -14,6 +14,12 @@ const TUNNEL_BENEFITS = [
const TUNNEL_PING_INTERVAL_MS = 2000;
const TUNNEL_PING_MAX_MS = 300000;
const CAVEMAN_LEVELS = [
{ id: "lite", label: "Lite", desc: "Drop filler, keep grammar" },
{ id: "full", label: "Full", desc: "Drop articles, fragments OK" },
{ id: "ultra", label: "Ultra", desc: "Telegraphic, max compression" },
];
export default function APIPageClient({ machineId }) {
const [keys, setKeys] = useState([]);
const [loading, setLoading] = useState(true);
@@ -26,6 +32,8 @@ export default function APIPageClient({ machineId }) {
const [hasPassword, setHasPassword] = useState(true);
const [tunnelDashboardAccess, setTunnelDashboardAccess] = useState(false);
const [rtkEnabled, setRtkEnabledState] = useState(true);
const [cavemanEnabled, setCavemanEnabled] = useState(false);
const [cavemanLevel, setCavemanLevel] = useState("full");
// Cloudflare Tunnel state
const [tunnelChecking, setTunnelChecking] = useState(true);
@@ -82,6 +90,8 @@ export default function APIPageClient({ machineId }) {
setHasPassword(data.hasPassword || false);
setTunnelDashboardAccess(data.tunnelDashboardAccess || false);
setRtkEnabledState(data.rtkEnabled !== false);
setCavemanEnabled(!!data.cavemanEnabled);
setCavemanLevel(data.cavemanLevel || "full");
}
if (statusRes.ok) {
const data = await statusRes.json();
@@ -182,6 +192,28 @@ export default function APIPageClient({ machineId }) {
}
};
const patchSetting = async (patch) => {
try {
await fetch("/api/settings", {
method: "PATCH",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(patch),
});
} catch (error) {
console.log("Error updating setting:", error);
}
};
const handleCavemanEnabled = (value) => {
setCavemanEnabled(value);
patchSetting({ cavemanEnabled: value });
};
const handleCavemanLevel = (level) => {
setCavemanLevel(level);
patchSetting({ cavemanLevel: level });
};
const fetchData = async () => {
try {
const keysRes = await fetch("/api/keys");
@@ -813,16 +845,26 @@ export default function APIPageClient({ machineId }) {
)}
</Card>
{/* Token Saver (RTK) */}
{/* Token Saver (RTK + Caveman) */}
<Card id="rtk">
<div className="flex items-center justify-between mb-2">
<h2 className="text-lg font-semibold">Token Saver</h2>
</div>
<div className="flex items-center justify-between pt-2">
<div className="flex items-center justify-between pt-2 pb-4 border-b border-border">
<div className="pr-4">
<p className="font-medium">Compress tool output</p>
<p className="font-medium">
Compress tool output{" "}
<a
href="https://github.com/rtk-ai/rtk"
target="_blank"
rel="noreferrer"
className="text-xs font-normal text-primary underline hover:opacity-80"
>
(RTK)
</a>
</p>
<p className="text-sm text-text-muted">
Auto-compress tool output (git diff/grep/ls/tree/logs) before sending to LLM to save tokens. Disable if you see issues.
Auto-compress tool output (git diff/grep/ls/tree/logs) before sending to LLM (60-90% fewer input tokens on common dev commands). Disable if you see issues.
</p>
</div>
<Toggle
@@ -830,6 +872,46 @@ export default function APIPageClient({ machineId }) {
onChange={() => handleRtkEnabled(!rtkEnabled)}
/>
</div>
<div className="flex items-center justify-between pt-4">
<div className="pr-4">
<p className="font-medium">
Compress LLM output{" "}
<a
href="https://github.com/JuliusBrussee/caveman"
target="_blank"
rel="noreferrer"
className="text-xs font-normal text-primary underline hover:opacity-80"
>
(Caveman)
</a>
</p>
<p className="text-sm text-text-muted">
Inject a terse-style instruction into the system prompt so the LLM replies shorter (~65% fewer output tokens on average, up to 87%). Code, errors and warnings stay exact.
</p>
</div>
<Toggle
checked={cavemanEnabled}
onChange={() => handleCavemanEnabled(!cavemanEnabled)}
/>
</div>
{cavemanEnabled && (
<div className="mt-3 flex items-center gap-2">
{CAVEMAN_LEVELS.map((lvl) => (
<button
key={lvl.id}
onClick={() => handleCavemanLevel(lvl.id)}
className={`px-3 py-1.5 rounded text-xs font-medium border transition-colors ${
cavemanLevel === lvl.id
? "bg-primary text-white border-primary"
: "bg-transparent border-border text-text-muted hover:bg-black/5 dark:hover:bg-white/5"
}`}
title={lvl.desc}
>
{lvl.label}
</button>
))}
</div>
)}
</Card>
{/* API Keys */}

View File

@@ -8,7 +8,6 @@ async function ensureInitialized() {
if (!initialized) {
await initTranslators();
initialized = true;
console.log("[SSE] Translators initialized");
}
}

View File

@@ -11,7 +11,6 @@ async function ensureInitialized() {
if (!initialized) {
await initTranslators();
initialized = true;
console.log("[SSE] Translators initialized");
}
}

View File

@@ -10,7 +10,6 @@ async function ensureInitialized() {
if (!initialized) {
await initTranslators();
initialized = true;
console.log("[SSE] Translators initialized for /v1/messages");
}
}

View File

@@ -7,7 +7,6 @@ async function ensureInitialized() {
if (!initialized) {
await initTranslators();
initialized = true;
console.log("[SSE] Translators initialized for /v1/responses");
}
}

View File

@@ -10,7 +10,6 @@ async function ensureInitialized() {
if (!initialized) {
await initTranslators();
initialized = true;
console.log("[SSE] Translators initialized for /v1beta/models");
}
}

View File

@@ -37,6 +37,8 @@ const DEFAULT_SETTINGS = {
outboundNoProxy: "",
mitmRouterBaseUrl: DEFAULT_MITM_ROUTER_BASE,
rtkEnabled: true,
cavemanEnabled: false,
cavemanLevel: "full",
};
function cloneDefaultData() {

View File

@@ -12,9 +12,15 @@ const { getCertForDomain } = require("./cert/generate");
const DB_FILE = path.join(DATA_DIR, "db.json");
const LOCAL_PORT = 443;
const IS_WIN = process.platform === "win32";
const ENABLE_FILE_LOG = false;
const ENABLE_FILE_LOG = true;
const INTERNAL_REQUEST_HEADER = { name: "x-request-source", value: "local" };
// Host rewrite for upstream forward: PROD cloudcode-pa is rate-limited (429),
// daily-cloudcode-pa (dev endpoint) accepts same body+token. Same trick as open-sse.
const HOST_REWRITE = {
"cloudcode-pa.googleapis.com": "daily-cloudcode-pa.googleapis.com",
};
// Load handlers — dev/ overrides handlers/ for private implementations
function loadHandler(name) {
try { return require(`./dev/${name}`); } catch {}
@@ -43,7 +49,6 @@ function sniCallback(servername, cb) {
cert: `${certData.cert}\n${rootCAPem}`
});
certCache.set(servername, ctx);
log(`🔐 Cert generated: ${servername}`);
cb(null, ctx);
} catch (e) {
err(`SNI error for ${servername}: ${e.message}`);
@@ -123,7 +128,8 @@ function getMappedModel(tool, model) {
* Also tees full stream into a dump file when ENABLE_FILE_LOG is on.
*/
async function passthrough(req, res, bodyBuffer, onResponse) {
const targetHost = (req.headers.host || TARGET_HOSTS[0]).split(":")[0];
const originalHost = (req.headers.host || TARGET_HOSTS[0]).split(":")[0];
const targetHost = HOST_REWRITE[originalHost] || originalHost;
const targetIP = await resolveTargetIP(targetHost);
const dumper = ENABLE_FILE_LOG ? createResponseDumper(req, "passthrough") : null;
@@ -194,25 +200,18 @@ const server = https.createServer(sslOptions, async (req, res) => {
const isChat = patterns.some(p => req.url.includes(p));
if (!isChat) return passthrough(req, res, bodyBuffer);
log(`🔍 [${tool}] url=${req.url} | bodyLen=${bodyBuffer.length}`);
// Cursor uses binary proto — model extraction not possible at this layer.
// Delegate directly to handler which decodes proto internally.
if (tool === "cursor") {
log(`⚡ intercept | cursor | proto`);
return handlers[tool].intercept(req, res, bodyBuffer, null, passthrough);
}
const model = extractModel(req.url, bodyBuffer);
log(`🔍 [${tool}] model="${model}"`);
const mappedModel = getMappedModel(tool, model);
if (!mappedModel) {
log(`⏩ passthrough | no mapping | ${tool} | ${model || "unknown"}`);
return passthrough(req, res, bodyBuffer);
}
log(`⚡ intercept | ${tool} | ${model}${mappedModel}`);
return handlers[tool].intercept(req, res, bodyBuffer, mappedModel, passthrough);
} catch (e) {
err(`Unhandled error: ${e.message}`);

View File

@@ -207,6 +207,8 @@ async function handleSingleModelChat(body, modelStr, clientRawRequest = null, re
apiKey,
ccFilterNaming: !!chatSettings.ccFilterNaming,
rtkEnabled: !!chatSettings.rtkEnabled,
cavemanEnabled: !!chatSettings.cavemanEnabled,
cavemanLevel: chatSettings.cavemanLevel || "full",
providerThinking,
// Detect source format by endpoint + body
sourceFormatOverride: request?.url ? detectFormatByEndpoint(new URL(request.url).pathname, body) : null,