EverydayTech Platform - Developer Reference
Complete Source Code Documentation - All Applications
Loading...
Searching...
No Matches
aiWorker.js
Go to the documentation of this file.
1// AI Worker: Ticket, Alert, Email, Remediation, Contract/Policy
2// Now uses llama.cpp HTTP API instead of Ollama spawn
3const Redis = require("ioredis");
4const { callLlama } = require("./services/llamaCppService");
5
6const redisConfig = require('./config/redis');
7const sub = new Redis(redisConfig);
8const pub = new Redis(redisConfig);
9
10// Subscribe to all relevant channels
11sub.subscribe("tickets:new", "alerts:new", "emails:inbound", "remediation:new", "contracts:policy");
12
13sub.on("message", async (channel, message) => {
14 const data = JSON.parse(message);
15 let type = "";
16 let outputKey = "";
17
18 switch(channel) {
19 case "tickets:new":
20 type = "ticket";
21 outputKey = `tickets:ai:analysis:${data.id}`;
22 break;
23 case "alerts:new":
24 type = "alert";
25 outputKey = `alerts:ai:analysis:${data.id}`;
26 break;
27 case "emails:inbound":
28 type = "email";
29 outputKey = `emails:ai:analysis:${data.id}`;
30 break;
31 case "remediation:new":
32 type = "remediation";
33 outputKey = `remediation:ai:analysis:${data.id}`;
34 break;
35 case "contracts:policy":
36 type = "contract";
37 outputKey = `contracts:ai:analysis:${data.id}`;
38 break;
39 default:
40 return;
41 }
42
43 try {
44 console.log(`[AI Worker] Processing ${channel} for ID: ${data.id}`);
45 const output = await callLlama(data.text, type);
46 await pub.publish(outputKey, JSON.stringify({ output }));
47 console.log(`[AI Worker] Published result to ${outputKey}`);
48 } catch (error) {
49 console.error(`[AI Worker] Error processing ${channel}:`, error.message);
50 await pub.publish(outputKey, JSON.stringify({
51 error: error.message,
52 output: "AI processing failed. Please try again later."
53 }));
54 }
55});
56
57console.log(`[AI Worker] Connected to Redis: ${process.env.REDIS_HOST || 'localhost'}:${process.env.REDIS_PORT || 6379}`);
58console.log(`[AI Worker] Using llama.cpp endpoint: ${process.env.LLAMA_CPP_ENDPOINT || 'http://localhost:8080'}`);
59console.log("AI Worker running: listening to Redis channels for automation.");