mirror of
https://github.com/decolua/9router.git
synced 2026-05-08 12:01:28 +00:00
feat: implement batch processing for README translation
- Introduce BATCH_SIZE configuration for parallel language translation - Update translation logic to process languages in batches with a delay to avoid rate limits - Enhance logging to display current batch being processed Also, add a new TimeAgo component for auto-updating time display in UsageStats.
This commit is contained in:
24
.github/scripts/translate-readme.js
vendored
24
.github/scripts/translate-readme.js
vendored
@@ -9,6 +9,7 @@ const API_MODEL = process.env.GLM_API_MODEL || 'glm-5';
|
||||
const API_KEY = process.env.GLM_API_KEY;
|
||||
const MAX_TOKENS = parseInt(process.env.GLM_MAX_TOKENS || '32000');
|
||||
const TEMPERATURE = parseFloat(process.env.GLM_TEMPERATURE || '0.3');
|
||||
const BATCH_SIZE = parseInt(process.env.TRANSLATE_BATCH_SIZE || '2'); // Number of languages to translate in parallel
|
||||
|
||||
const SUPPORTED_LANGUAGES = {
|
||||
vi: 'Vietnamese',
|
||||
@@ -144,20 +145,29 @@ async function main() {
|
||||
console.log(`API Endpoint: ${API_ENDPOINT}`);
|
||||
console.log(`Model: ${API_MODEL}`);
|
||||
console.log(`Max Tokens: ${MAX_TOKENS}`);
|
||||
console.log(`Batch Size: ${BATCH_SIZE}`);
|
||||
console.log(`Languages: ${targetLangs.join(', ')}`);
|
||||
console.log('='.repeat(60));
|
||||
|
||||
const readmePath = path.join(__dirname, '../../README.md');
|
||||
const readmeContent = fs.readFileSync(readmePath, 'utf8');
|
||||
|
||||
// Translate languages sequentially (streaming doesn't work well in parallel)
|
||||
// Translate languages in batches
|
||||
const results = [];
|
||||
for (const lang of targetLangs) {
|
||||
try {
|
||||
const result = await translateToLanguage(readmeContent, lang);
|
||||
results.push({ status: 'fulfilled', value: result });
|
||||
} catch (error) {
|
||||
results.push({ status: 'rejected', reason: error, lang });
|
||||
for (let i = 0; i < targetLangs.length; i += BATCH_SIZE) {
|
||||
const batch = targetLangs.slice(i, i + BATCH_SIZE);
|
||||
console.log(`\nBatch ${Math.floor(i / BATCH_SIZE) + 1}/${Math.ceil(targetLangs.length / BATCH_SIZE)}: ${batch.join(', ')}`);
|
||||
|
||||
const batchResults = await Promise.allSettled(
|
||||
batch.map(lang => translateToLanguage(readmeContent, lang))
|
||||
);
|
||||
|
||||
results.push(...batchResults);
|
||||
|
||||
// Wait between batches to avoid rate limit
|
||||
if (i + BATCH_SIZE < targetLangs.length) {
|
||||
console.log('\nWaiting 3s before next batch...');
|
||||
await new Promise(resolve => setTimeout(resolve, 3000));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,18 @@ function timeAgo(timestamp) {
|
||||
return `${Math.floor(diff / 86400)}d ago`;
|
||||
}
|
||||
|
||||
// Auto-update time display every second without re-rendering parent
|
||||
function TimeAgo({ timestamp }) {
|
||||
const [, setTick] = useState(0);
|
||||
|
||||
useEffect(() => {
|
||||
const timer = setInterval(() => setTick(t => t + 1), 1000);
|
||||
return () => clearInterval(timer);
|
||||
}, []);
|
||||
|
||||
return <>{timeAgo(timestamp)}</>;
|
||||
}
|
||||
|
||||
function RecentRequests({ requests = [] }) {
|
||||
return (
|
||||
<Card className="flex flex-col overflow-hidden" padding="sm" style={{ height: 480 }}>
|
||||
@@ -52,7 +64,7 @@ function RecentRequests({ requests = [] }) {
|
||||
{" "}
|
||||
<span className="text-success">{fmt(r.completionTokens)}↓</span>
|
||||
</td>
|
||||
<td className="py-1.5 text-right text-text-muted whitespace-nowrap">{timeAgo(r.timestamp)}</td>
|
||||
<td className="py-1.5 text-right text-text-muted whitespace-nowrap"><TimeAgo timestamp={r.timestamp} /></td>
|
||||
</tr>
|
||||
);
|
||||
})}
|
||||
|
||||
Reference in New Issue
Block a user