Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Llm generate question service #84

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ services:
image: ghcr.io/arquisoft/wichat_es6a/llmservice:latest
profiles: ["dev", "prod"]
build: ./llmservice
env_file:
- ./llmservice/.env
ports:
- "8003:8003"
networks:
Expand Down
116 changes: 81 additions & 35 deletions llmservice/llm-service.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,37 +3,41 @@ const express = require('express');

const app = express();
const port = 8003;
let moderation = "You are a quiz game assistant.";
require('dotenv').config(); // Cargar las variables de entorno desde .env

// Middleware to parse JSON in request body
app.use(express.json());
// Middleware para parsear JSON
app.use(express.json());

// Agregar apiKey automáticamente en la solicitud si no está presente
app.use((req, res, next) => {
// Verificar si no se incluye apiKey en el cuerpo de la solicitud
if (!req.body.apiKey) {
req.body.apiKey = process.env.LLM_API_KEY; // Usar la API Key desde las variables de entorno
}
next();
});

// Define configurations for different LLM APIs
const llmConfigs = {
gemini: {
url: (apiKey) => `https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=${apiKey}`,
transformRequest: (question) => ({
contents: [{ parts: [{ text: question }] }]
}),
transformResponse: (response) => response.data.candidates[0]?.content?.parts[0]?.text
},
empathy: {
url: () => 'https://empathyai.staging.empathy.co/v1/chat/completions',
transformRequest: (question) => ({
url: () => 'https://ai-challenge.empathy.ai/v1/chat/completions',
transformRequest: (question, moderation) => ({
model: "qwen/Qwen2.5-Coder-7B-Instruct",
stream: false, // No soporta stream=true con axios directamente
messages: [
{ role: "system", content: "You are a helpful assistant." },
{ role: "system", content: moderation },
{ role: "user", content: question }
]
}),
transformResponse: (response) => response.data.choices[0]?.message?.content,
transformResponse: (response) => response.data.choices?.[0]?.message?.content || "No response",
headers: (apiKey) => ({
Authorization: `Bearer ${apiKey}`,
'Content-Type': 'application/json'
})
}
};

// Function to validate required fields in the request body
// Validar campos requeridos
function validateRequiredFields(req, requiredFields) {
for (const field of requiredFields) {
if (!(field in req.body)) {
Expand All @@ -42,50 +46,92 @@ function validateRequiredFields(req, requiredFields) {
}
}

// Generic function to send questions to LLM
async function sendQuestionToLLM(question, apiKey, model = 'gemini') {
// Función genérica para enviar preguntas al LLM
async function sendQuestionToLLM(question, apiKey, moderation) {
try {
const config = llmConfigs[model];
const config = llmConfigs["empathy"];
if (!config) {
throw new Error(`Model "${model}" is not supported.`);
throw new Error(`Model is not supported.`);
}

const url = config.url(apiKey);
const requestData = config.transformRequest(question);

const headers = {
'Content-Type': 'application/json',
...(config.headers ? config.headers(apiKey) : {})
};
const url = config.url();
const requestData = config.transformRequest(question, moderation);
const headers = config.headers(apiKey);

const response = await axios.post(url, requestData, { headers });

return config.transformResponse(response);

} catch (error) {
console.error(`Error sending question to ${model}:`, error.message || error);
return null;
console.error(`Error sending question:`, error.message || error);
return "Error processing request.";
}
}

// Ruta para configurar el prompt del asistente
app.post('/configureAssistant', async (req, res) => {
if (!req.body.moderation) {
return res.status(400).json({ error: "Missing moderation prompt" });
}
moderation = req.body.moderation;
res.json({ message: "Moderation prompt updated" });
});

// Ruta para enviar una pregunta
app.post('/ask', async (req, res) => {
try {
// Check if required fields are present in the request body
validateRequiredFields(req, ['question', 'model', 'apiKey']);
validateRequiredFields(req, ['question']);

const { question, apiKey } = req.body; // La apiKey ya ha sido añadida automáticamente
const answer = await sendQuestionToLLM(question, apiKey, moderation);

const { question, model, apiKey } = req.body;
const answer = await sendQuestionToLLM(question, apiKey, model);
res.json({ answer });

} catch (error) {
res.status(400).json({ error: error.message });
}
});

// Servicio 1: Generación de preguntas y respuestas a partir del contexto
app.post('/generateQuestions', async (req, res) => {
try {
if (!req.body.context) {
return res.status(400).json({ error: "Missing context" });
}

const context = req.body.context;

const prompt = `A partir del siguiente texto, genera 4 preguntas de opción múltiple.
Cada pregunta debe tener 4 respuestas, una correcta y tres incorrectas:

Texto: "${context}"

Responde en formato JSON, la respuesta debe incluir UNICAMENTE el formato JSON con las preguntas y respuestas:
{
"questions": [
{
"question": "Pregunta 1",
"answers": [
{ "text": "Respuesta correcta", "correct": true },
{ "text": "Respuesta incorrecta 1", "correct": false },
{ "text": "Respuesta incorrecta 2", "correct": false },
{ "text": "Respuesta incorrecta 3", "correct": false }
]
},
...
]
}`;

const response = await sendQuestionToLLM(prompt, req.body.apiKey, moderation);
console.log("Response:", response);
res.json(response);
} catch (error) {
res.status(500).json({ error: "Failed to generate questions" });
}
});

const server = app.listen(port, () => {
console.log(`LLM Service listening at http://localhost:${port}`);
});

module.exports = server


module.exports = server;
13 changes: 13 additions & 0 deletions llmservice/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

17 changes: 9 additions & 8 deletions llmservice/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,13 @@
"license": "ISC",
"description": "",
"homepage": "https://github.com/arquisoft/wichat_es6a#readme",
"dependencies": {
"axios": "^1.7.9",
"express": "^4.21.2"
},
"devDependencies": {
"jest": "^29.7.0",
"supertest": "^7.0.0"
}
"dependencies": {
"axios": "^1.7.9",
"dotenv": "^16.4.7",
"express": "^4.21.2"
},
"devDependencies": {
"jest": "^29.7.0",
"supertest": "^7.0.0"
}
}
24 changes: 24 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"dependencies": {
"dotenv": "^16.4.7"
}
}
Loading