EverydayTech Platform - Developer Reference
Complete Source Code Documentation - All Applications
Loading...
Searching...
No Matches
llmSSE.js
Go to the documentation of this file.
1// llmSSE.js - SSE/streaming support for live AI output
2const express = require('express');
3const router = express.Router();
4const { completionStream, callLlama } = require('./llamaCppService');
5
6router.post('/ai/stream', async (req, res) => {
7 res.set({
8 'Content-Type': 'text/event-stream',
9 'Cache-Control': 'no-cache',
10 'Connection': 'keep-alive',
11 });
12 const { text, type } = req.body;
13
14 try {
15 // Use real streaming from llama.cpp
16 let prompt;
17 switch (type) {
18 case 'summary':
19 prompt = `Summarize the following text concisely:\n\n${text}`;
20 break;
21 case 'expand':
22 prompt = `Expand the following bullet points into full paragraphs:\n\n${text}`;
23 break;
24 default:
25 prompt = `Rewrite the following in a clear, professional tone. Preserve technical meaning:\n\n${text}`;
26 }
27
28 await completionStream(prompt, (chunk) => {
29 res.write(`data: ${chunk}\n\n`);
30 });
31
32 res.write('event: end\ndata: [DONE]\n\n');
33 res.end();
34 } catch (error) {
35 console.error('[SSE] Streaming error:', error.message);
36 res.write(`event: error\ndata: ${error.message}\n\n`);
37 res.end();
38 }
39});
40
41module.exports = router;