From 14ca874435f856d32da775fdb385c5a463450370 Mon Sep 17 00:00:00 2001 From: Iyansr97 Date: Sat, 8 Mar 2025 19:56:12 +0100 Subject: [PATCH 1/3] LLM-service is now avaiable and runing correctly. --- llmservice/llm-service.js | 66 +++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 34 deletions(-) diff --git a/llmservice/llm-service.js b/llmservice/llm-service.js index da0ff837..f7f7fa77 100644 --- a/llmservice/llm-service.js +++ b/llmservice/llm-service.js @@ -3,29 +3,22 @@ const express = require('express'); const app = express(); const port = 8003; +let moderation = "You are a helpful assistant."; -// Middleware to parse JSON in request body -app.use(express.json()); +app.use(express.json()); // Middleware para parsear JSON -// Define configurations for different LLM APIs const llmConfigs = { - gemini: { - url: (apiKey) => `https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=${apiKey}`, - transformRequest: (question) => ({ - contents: [{ parts: [{ text: question }] }] - }), - transformResponse: (response) => response.data.candidates[0]?.content?.parts[0]?.text - }, empathy: { - url: () => 'https://empathyai.staging.empathy.co/v1/chat/completions', - transformRequest: (question) => ({ + url: () => 'https://ai-challenge.empathy.ai/v1/chat/completions', + transformRequest: (question, moderation) => ({ model: "qwen/Qwen2.5-Coder-7B-Instruct", + stream: false, // No soporta stream=true con axios directamente messages: [ - { role: "system", content: "You are a helpful assistant." }, + { role: "system", content: moderation }, { role: "user", content: question } ] }), - transformResponse: (response) => response.data.choices[0]?.message?.content, + transformResponse: (response) => response.data.choices?.[0]?.message?.content || "No response", headers: (apiKey) => ({ Authorization: `Bearer ${apiKey}`, 'Content-Type': 'application/json' @@ -33,7 +26,7 @@ const llmConfigs = { } }; -// Function to validate required fields in the request body +// Validar campos requeridos function validateRequiredFields(req, requiredFields) { for (const field of requiredFields) { if (!(field in req.body)) { @@ -42,39 +35,46 @@ function validateRequiredFields(req, requiredFields) { } } -// Generic function to send questions to LLM -async function sendQuestionToLLM(question, apiKey, model = 'gemini') { +// Función genérica para enviar preguntas al LLM +async function sendQuestionToLLM(question, apiKey, moderation) { try { - const config = llmConfigs[model]; + const config = llmConfigs["empathy"]; if (!config) { - throw new Error(`Model "${model}" is not supported.`); + throw new Error(`Model is not supported.`); } - const url = config.url(apiKey); - const requestData = config.transformRequest(question); + const url = config.url(); + const requestData = config.transformRequest(question, moderation); - const headers = { - 'Content-Type': 'application/json', - ...(config.headers ? config.headers(apiKey) : {}) - }; + const headers = config.headers(apiKey); const response = await axios.post(url, requestData, { headers }); return config.transformResponse(response); } catch (error) { - console.error(`Error sending question to ${model}:`, error.message || error); - return null; + console.error(`Error sending question:`, error.message || error); + return "Error processing request."; } } +// Ruta para configurar el prompt del asistente +app.post('/configureAssistant', async (req, res) => { + if (!req.body.moderation) { + return res.status(400).json({ error: "Missing moderation prompt" }); + } + moderation = req.body.moderation; + res.json({ message: "Moderation prompt updated" }); +}); + +// Ruta para enviar una pregunta app.post('/ask', async (req, res) => { try { - // Check if required fields are present in the request body - validateRequiredFields(req, ['question', 'model', 'apiKey']); + validateRequiredFields(req, ['question', 'apiKey']); + + const { question, apiKey } = req.body; + const answer = await sendQuestionToLLM(question, apiKey, moderation); - const { question, model, apiKey } = req.body; - const answer = await sendQuestionToLLM(question, apiKey, model); res.json({ answer }); } catch (error) { @@ -86,6 +86,4 @@ const server = app.listen(port, () => { console.log(`LLM Service listening at http://localhost:${port}`); }); -module.exports = server - - +module.exports = server; \ No newline at end of file From f9c84993ad2d7b6b538c75519ed04645f41d7a70 Mon Sep 17 00:00:00 2001 From: Iyansr97 Date: Sun, 9 Mar 2025 13:05:32 +0100 Subject: [PATCH 2/3] Configured secure use of the API key in the LLM service through the use of a .env file --- docker-compose.yml | 2 ++ llmservice/llm-service.js | 24 ++++++++++++++++++------ llmservice/package-lock.json | 13 +++++++++++++ llmservice/package.json | 17 +++++++++-------- package-lock.json | 24 ++++++++++++++++++++++++ package.json | 5 +++++ 6 files changed, 71 insertions(+), 14 deletions(-) create mode 100644 package-lock.json create mode 100644 package.json diff --git a/docker-compose.yml b/docker-compose.yml index f13409b1..f75ab39f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -43,6 +43,8 @@ services: image: ghcr.io/arquisoft/wichat_es6a/llmservice:latest profiles: ["dev", "prod"] build: ./llmservice + env_file: + - ./llmservice/.env ports: - "8003:8003" networks: diff --git a/llmservice/llm-service.js b/llmservice/llm-service.js index f7f7fa77..66cbfc76 100644 --- a/llmservice/llm-service.js +++ b/llmservice/llm-service.js @@ -3,9 +3,20 @@ const express = require('express'); const app = express(); const port = 8003; -let moderation = "You are a helpful assistant."; +let moderation = "You are a quiz game assistant."; +require('dotenv').config(); // Cargar las variables de entorno desde .env -app.use(express.json()); // Middleware para parsear JSON +// Middleware para parsear JSON +app.use(express.json()); + +// Agregar apiKey automáticamente en la solicitud si no está presente +app.use((req, res, next) => { + // Verificar si no se incluye apiKey en el cuerpo de la solicitud + if (!req.body.apiKey) { + req.body.apiKey = process.env.LLM_API_KEY; // Usar la API Key desde las variables de entorno + } + next(); +}); const llmConfigs = { empathy: { @@ -45,7 +56,6 @@ async function sendQuestionToLLM(question, apiKey, moderation) { const url = config.url(); const requestData = config.transformRequest(question, moderation); - const headers = config.headers(apiKey); const response = await axios.post(url, requestData, { headers }); @@ -70,9 +80,9 @@ app.post('/configureAssistant', async (req, res) => { // Ruta para enviar una pregunta app.post('/ask', async (req, res) => { try { - validateRequiredFields(req, ['question', 'apiKey']); + validateRequiredFields(req, ['question']); - const { question, apiKey } = req.body; + const { question, apiKey } = req.body; // La apiKey ya ha sido añadida automáticamente const answer = await sendQuestionToLLM(question, apiKey, moderation); res.json({ answer }); @@ -82,8 +92,10 @@ app.post('/ask', async (req, res) => { } }); + + const server = app.listen(port, () => { console.log(`LLM Service listening at http://localhost:${port}`); }); -module.exports = server; \ No newline at end of file +module.exports = server; diff --git a/llmservice/package-lock.json b/llmservice/package-lock.json index c801c4a7..f88e73de 100644 --- a/llmservice/package-lock.json +++ b/llmservice/package-lock.json @@ -10,6 +10,7 @@ "license": "ISC", "dependencies": { "axios": "^1.7.9", + "dotenv": "^16.4.7", "express": "^4.21.2" }, "devDependencies": { @@ -1803,6 +1804,18 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, + "node_modules/dotenv": { + "version": "16.4.7", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", + "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, "node_modules/dunder-proto": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", diff --git a/llmservice/package.json b/llmservice/package.json index 9d5aa058..52aace12 100644 --- a/llmservice/package.json +++ b/llmservice/package.json @@ -9,12 +9,13 @@ "license": "ISC", "description": "", "homepage": "https://github.com/arquisoft/wichat_es6a#readme", - "dependencies": { - "axios": "^1.7.9", - "express": "^4.21.2" - }, - "devDependencies": { - "jest": "^29.7.0", - "supertest": "^7.0.0" - } + "dependencies": { + "axios": "^1.7.9", + "dotenv": "^16.4.7", + "express": "^4.21.2" + }, + "devDependencies": { + "jest": "^29.7.0", + "supertest": "^7.0.0" + } } diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 00000000..163a4de4 --- /dev/null +++ b/package-lock.json @@ -0,0 +1,24 @@ +{ + "name": "wichat_es6a", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "dependencies": { + "dotenv": "^16.4.7" + } + }, + "node_modules/dotenv": { + "version": "16.4.7", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", + "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 00000000..33cc0ace --- /dev/null +++ b/package.json @@ -0,0 +1,5 @@ +{ + "dependencies": { + "dotenv": "^16.4.7" + } +} From c6a9949da7733bf558a1648af203fe47d274962c Mon Sep 17 00:00:00 2001 From: Iyansr97 Date: Sun, 9 Mar 2025 13:07:51 +0100 Subject: [PATCH 3/3] Created service to generate questions and answers in JSON format from a given context using the AI API. --- llmservice/llm-service.js | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/llmservice/llm-service.js b/llmservice/llm-service.js index 66cbfc76..4bbae851 100644 --- a/llmservice/llm-service.js +++ b/llmservice/llm-service.js @@ -92,7 +92,43 @@ app.post('/ask', async (req, res) => { } }); +// Servicio 1: Generación de preguntas y respuestas a partir del contexto +app.post('/generateQuestions', async (req, res) => { + try { + if (!req.body.context) { + return res.status(400).json({ error: "Missing context" }); + } + + const context = req.body.context; + + const prompt = `A partir del siguiente texto, genera 4 preguntas de opción múltiple. + Cada pregunta debe tener 4 respuestas, una correcta y tres incorrectas: + + Texto: "${context}" + + Responde en formato JSON, la respuesta debe incluir UNICAMENTE el formato JSON con las preguntas y respuestas: + { + "questions": [ + { + "question": "Pregunta 1", + "answers": [ + { "text": "Respuesta correcta", "correct": true }, + { "text": "Respuesta incorrecta 1", "correct": false }, + { "text": "Respuesta incorrecta 2", "correct": false }, + { "text": "Respuesta incorrecta 3", "correct": false } + ] + }, + ... + ] + }`; + const response = await sendQuestionToLLM(prompt, req.body.apiKey, moderation); + console.log("Response:", response); + res.json(response); + } catch (error) { + res.status(500).json({ error: "Failed to generate questions" }); + } +}); const server = app.listen(port, () => { console.log(`LLM Service listening at http://localhost:${port}`);