1// llmSSE.js - SSE/streaming support for live AI output
2const express = require('express');
3const router = express.Router();
4const { completionStream, callLlama } = require('./llamaCppService');
6router.post('/ai/stream', async (req, res) => {
8 'Content-Type': 'text/event-stream',
9 'Cache-Control': 'no-cache',
10 'Connection': 'keep-alive',
12 const { text, type } = req.body;
15 // Use real streaming from llama.cpp
19 prompt = `Summarize the following text concisely:\n\n${text}`;
22 prompt = `Expand the following bullet points into full paragraphs:\n\n${text}`;
25 prompt = `Rewrite the following in a clear, professional tone. Preserve technical meaning:\n\n${text}`;
28 await completionStream(prompt, (chunk) => {
29 res.write(`data: ${chunk}\n\n`);
32 res.write('event: end\ndata: [DONE]\n\n');
35 console.error('[SSE] Streaming error:', error.message);
36 res.write(`event: error\ndata: ${error.message}\n\n`);
41module.exports = router;