1// AI Worker: Ticket, Alert, Email, Remediation, Contract/Policy
2// Now uses llama.cpp HTTP API instead of Ollama spawn
3const Redis = require("ioredis");
4const { callLlama } = require("./services/llamaCppService");
6const redisConfig = require('./config/redis');
7const sub = new Redis(redisConfig);
8const pub = new Redis(redisConfig);
10// Subscribe to all relevant channels
11sub.subscribe("tickets:new", "alerts:new", "emails:inbound", "remediation:new", "contracts:policy");
13sub.on("message", async (channel, message) => {
14 const data = JSON.parse(message);
21 outputKey = `tickets:ai:analysis:${data.id}`;
25 outputKey = `alerts:ai:analysis:${data.id}`;
27 case "emails:inbound":
29 outputKey = `emails:ai:analysis:${data.id}`;
31 case "remediation:new":
33 outputKey = `remediation:ai:analysis:${data.id}`;
35 case "contracts:policy":
37 outputKey = `contracts:ai:analysis:${data.id}`;
44 console.log(`[AI Worker] Processing ${channel} for ID: ${data.id}`);
45 const output = await callLlama(data.text, type);
46 await pub.publish(outputKey, JSON.stringify({ output }));
47 console.log(`[AI Worker] Published result to ${outputKey}`);
49 console.error(`[AI Worker] Error processing ${channel}:`, error.message);
50 await pub.publish(outputKey, JSON.stringify({
52 output: "AI processing failed. Please try again later."
57console.log(`[AI Worker] Connected to Redis: ${process.env.REDIS_HOST || 'localhost'}:${process.env.REDIS_PORT || 6379}`);
58console.log(`[AI Worker] Using llama.cpp endpoint: ${process.env.LLAMA_CPP_ENDPOINT || 'http://localhost:8080'}`);
59console.log("AI Worker running: listening to Redis channels for automation.");