EverydayTech Platform - Developer Reference
Complete Source Code Documentation - All Applications
Loading...
Searching...
No Matches
ai.js
Go to the documentation of this file.
1/**
2 * @file ai.js
3 * @module AIRoutes
4 * @description AI/LLM-powered text processing routes. Provides text rewriting/rewording capabilities using llama.cpp HTTP API with job queuing, rate limiting, and SSE streaming support. Used for AI-assisted ticket descriptions, professional email composition, and content enhancement.
5 * @see {@link ../services/llmQueue} for LLM job queue management
6 * @see {@link ../services/llmSSE} for Server-Sent Events streaming
7 * @see {@link ../middleware/llmCacheRateLimit} for rate limiting and caching
8 * @apiDefine AIGroup AI
9 * @apiGroup AI
10 * @apiHeader {string} [Authorization] Bearer token (optional for some endpoints).
11 * @apiError (Error 400) BadRequest No text provided.
12 * @apiError (Error 429) TooManyRequests Rate limit exceeded.
13 * @apiError (Error 500) ServerError LLM processing failed.
14 */
15
16const express = require('express');
17const router = express.Router();
18const { llmQueue } = require('../services/llmQueue');
19const llmSSE = require('../services/llmSSE');
20
21// Use llama.cpp HTTP API for rewording
22const fetch = require('node-fetch');
23const llmCacheRateLimit = require('../middleware/llmCacheRateLimit');
24
25/**
26 * @api {post} /ai/reword Reword/rewrite text using AI
27 * @apiName RewordText
28 * @apiGroup AI
29 * @apiDescription Process text through LLM (llama.cpp) to rewrite, improve, or adapt content based on optional prompt. Jobs are queued and rate-limited per tenant. Result is cached for identical requests. Used for professional email composition, ticket description enhancement, and content improvement.
30 * @apiParam {string} text Original text to reword.
31 * @apiParam {string} [prompt] Optional rewriting instruction (e.g., "make it more professional", "simplify for non-technical audience").
32 * @apiParam {string} [tenantId] Tenant ID for rate limiting and caching.
33 * @apiSuccess {string} reworded AI-rewritten text.
34 * @apiError (Error 400) BadRequest No text provided.
35 * @apiError (Error 429) TooManyRequests Rate limit exceeded (10 requests per minute per tenant).
36 * @apiExample {curl} Example usage:
37 * curl -X POST -d '{"text":"server not work","prompt":"make it professional"}' https://api.example.com/ai/reword
38 * @apiExample {json} Response example:
39 * {
40 * "reworded": "The server is currently experiencing connectivity issues and requires immediate attention."
41 * }
42 */
43router.post('/reword', llmCacheRateLimit, async (req, res) => {
44 const { text, prompt, tenantId } = req.body;
45 console.log('[AI REWORD] Incoming request:', { text, prompt, tenantId });
46 if (!text) {
47 console.warn('[AI REWORD] No text provided');
48 return res.status(400).json({ error: 'No text provided' });
49 }
50
51 // Enqueue LLM job
52 const job = await llmQueue.add('reword', { text, prompt, tenantId });
53 const result = await job.waitUntilFinished();
54 res.json({ reworded: result.result });
55});
56
57router.use(llmSSE);
58
59module.exports = router;