Skip to content

Commit

Permalink
Merge pull request #2 from romantech/faeture/langchain-update
Browse files Browse the repository at this point in the history
  • Loading branch information
romantech committed Apr 6, 2024
2 parents d077c08 + 4943ab4 commit 364e61c
Show file tree
Hide file tree
Showing 15 changed files with 758 additions and 819 deletions.
13 changes: 10 additions & 3 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,11 +1,18 @@
# Deployment Environment Ports
PORT=3001
NODE_ENV=development # koyeb 환경 변수 설정이랑 맞추기 위해 추가
# Added to match koyeb environment variable settings
NODE_ENV=development
# Specify which origins to allow, separated by commas, e.g. https://google.com,http://localhost:3000
CORS_ORIGIN=

OPENAI_API_KEY=

REDIS_HOST=
REDIS_PORT=
REDIS_USERNAME=
REDIS_PASSWORD=
CORS_ORIGIN=
FINE_TUNED_ID=

MODEL_GPT_3_5_FT=gpt-3.5-turbo
MODEL_GPT_3_5=gpt-3.5-turbo
MODEL_GPT_4=gpt-4

14 changes: 8 additions & 6 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
"name": "project-server",
"version": "1.0.0",
"description": "Personal project server that provides a RESTful API",
"packageManager": "[email protected]",
"main": "server.ts",
"_moduleAliases": {
"@": "dist"
Expand All @@ -20,6 +21,7 @@
"author": "Romantech",
"license": "ISC",
"dependencies": {
"@langchain/openai": "^0.0.26",
"@types/request-ip": "^0.0.41",
"compression": "^1.7.4",
"cookie-parser": "^1.4.6",
Expand All @@ -28,14 +30,14 @@
"express-validator": "^7.0.1",
"helmet": "^7.1.0",
"ioredis": "^5.3.2",
"langchain": "^0.0.209",
"langchain": "^0.1.31",
"module-alias": "^2.2.3",
"morgan": "^1.10.0",
"node-schedule": "^2.1.1",
"openai": "^4.23.0",
"openai": "^4.33.0",
"request-ip": "^3.3.0",
"winston": "^3.11.0",
"winston-daily-rotate-file": "^4.7.1"
"winston-daily-rotate-file": "^5.0.0"
},
"devDependencies": {
"@types/compression": "^1.7.5",
Expand All @@ -45,10 +47,10 @@
"@types/express-serve-static-core": "^4.17.41",
"@types/module-alias": "^2.0.4",
"@types/morgan": "^1.9.9",
"@types/node": "^20.10.5",
"@types/node": "^20.12.5",
"@types/node-schedule": "^2.1.5",
"@typescript-eslint/eslint-plugin": "^6.15.0",
"@typescript-eslint/parser": "^6.15.0",
"@typescript-eslint/eslint-plugin": "^7.5.0",
"@typescript-eslint/parser": "^7.5.0",
"dotenv": "^16.3.1",
"eslint": "^8.56.0",
"eslint-config-prettier": "^9.1.0",
Expand Down
6 changes: 3 additions & 3 deletions src/config/createServer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@ import morgan from 'morgan';
import compression from 'compression';
import cookieParser from 'cookie-parser';
import cors, { CorsOptions } from 'cors';
import { CORS_ORIGIN, isProd } from '@/config/environment';
import { envConfig } from '@/config/environment';

const morganFormat = isProd() ? 'combined' : 'dev';
const morganFormat = envConfig.isProd ? 'combined' : 'dev';
const corsOptions: CorsOptions = {
/** Access-Control-Allow-Origin 응답 헤더 설정 (허용할 오리진 목록) */
origin: CORS_ORIGIN,
origin: envConfig.corsOrigins,
/** Access-Control-Allow-Methods 응답 헤더 설정 (허용할 HTTP 메서드 목록) */
methods: ['GET', 'POST'],
/** Access-Control-Allow-Headers 응답 헤더 설정 (허용할 요청 헤더 목록) */
Expand Down
56 changes: 39 additions & 17 deletions src/config/environment.ts
Original file line number Diff line number Diff line change
@@ -1,28 +1,50 @@
/**
* 웹스톰에서 alias 설정을 위해 tsconfig.json 파일의 baseUrl 속성을 설정하면
* process.env 참조시 해결되지 않은 변수로 나오는 문제 있음
* import { env } from 'node:process' 구문으로 임포트해서 사용해서 임시 해결
* */
import { env } from 'node:process';

export enum EnvVars {
export enum RequiredEnv { // Required Environment Variables
PORT = 'PORT',
NODE_ENV = 'NODE_ENV',
OPENAI_API_KEY = 'OPENAI_API_KEY',
}

export enum OptionalEnv { // Optional Environment Variables
CORS_ORIGIN = 'CORS_ORIGIN',

REDIS_HOST = 'REDIS_HOST',
REDIS_PORT = 'REDIS_PORT',
REDIS_PASSWORD = 'REDIS_PASSWORD',
REDIS_USERNAME = 'REDIS_USERNAME',
CORS_ORIGIN = 'CORS_ORIGIN',
FINE_TUNED_ID = 'FINE_TUNED_ID',

MODEL_GPT_3_5_FT = 'MODEL_GPT_3_5_FT',
MODEL_GPT_3_5 = 'MODEL_GPT_3_5',
MODEL_GPT_4 = 'MODEL_GPT_4',
}

/**
* 웹스톰에서 alias 설정을 위해 tsconfig.json 파일의 baseUrl 속성을 설정하면
* process.env 참조시 해결되지 않은 변수로 나오는 문제 있음
* import { env } from 'node:process' 구문으로 임포트해서 사용해서 임시 해결
* */
export const isProd = () => env[EnvVars.NODE_ENV] === 'production';
export const PORT = isProd() ? env[EnvVars.PORT] : 3001;
export const REDIS_HOST = env[EnvVars.REDIS_HOST];
export const REDIS_PORT = Number(env[EnvVars.REDIS_PORT]);
export const REDIS_PASSWORD = env[EnvVars.REDIS_PASSWORD];
export const REDIS_USERNAME = env[EnvVars.REDIS_USERNAME];
export const OPENAI_API_KEY = env[EnvVars.OPENAI_API_KEY];
export const CORS_ORIGIN = env[EnvVars.CORS_ORIGIN]?.split(',') ?? [];
export const FINE_TUNED_ID = env[EnvVars.FINE_TUNED_ID];
const loadEnvironment = () => {
const isProd = env[RequiredEnv.NODE_ENV] === 'production';
const DEFAULT_SERVER_PORT = 3001;

return {
isProd,
port: isProd ? Number(env[RequiredEnv.PORT]) : DEFAULT_SERVER_PORT,
corsOrigins: env[OptionalEnv.CORS_ORIGIN]?.split(',') ?? [],
openAIKey: env[RequiredEnv.OPENAI_API_KEY],
redis: {
host: env[OptionalEnv.REDIS_HOST],
port: Number(env[OptionalEnv.REDIS_PORT]),
password: env[OptionalEnv.REDIS_PASSWORD],
username: env[OptionalEnv.REDIS_USERNAME],
},
modelNames: {
gpt_3_5_FT: env[OptionalEnv.MODEL_GPT_3_5_FT] ?? 'gpt-3.5-turbo',
gpt_3_5: env[OptionalEnv.MODEL_GPT_3_5] ?? 'gpt-3.5-turbo',
gpt_4: env[OptionalEnv.MODEL_GPT_4] ?? 'gpt-4',
},
} as const;
};

export const envConfig = loadEnvironment();
24 changes: 19 additions & 5 deletions src/config/logger.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,26 @@ const DATE_FORMAT = 'YYYY-MM-DD HH:mm:ss';
const ignorePrivate = format((info) => (info.private ? false : info));

/** Custom logging format */
const customFormat = printf(({ level, message, timestamp, ...meta }) => {
let result = `[${level}] ${timestamp}: ${message}`;
if (Object.keys(meta).length) {
result += ` ${JSON.stringify(meta)}`;
const formatStackTrace = (stack: string, limit = 3) => {
const [errorMessage, ...restLines] = stack.split('\n');
return [
errorMessage.replace(': ', '-'),
...restLines.slice(0, limit).map((line) => `-> ${line}`),
].join('\n');
};

const customFormat = printf(({ level, message, timestamp, stack, ...meta }) => {
let logMessage = `[${timestamp}] [${level}]: ${message}`;

if (stack?.trim()) logMessage += `\nStack: ${formatStackTrace(stack)}`;

const metaKeys = Object.keys(meta);
if (metaKeys.length > 0) {
const metaInfo = JSON.stringify(meta, null, 2);
logMessage += `\nMeta: ${metaInfo}`;
}
return result;

return logMessage;
});

/** Transform log level to uppercase */
Expand Down
6 changes: 3 additions & 3 deletions src/constants/analyzer.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import { GPTModel } from '@/services';
import { AIModelKey } from '@/services';

export const ANALYSIS_DECREMENT_COUNT = {
[GPTModel.GPT_3]: 1,
[GPTModel.GPT_4]: 5,
[AIModelKey.GPT_3_5_FT]: 1,
[AIModelKey.GPT_4]: 5,
};

export enum RandomSentenceParam {
Expand Down
29 changes: 18 additions & 11 deletions src/controllers/analyzer/createAnalysis.ts
Original file line number Diff line number Diff line change
@@ -1,19 +1,20 @@
import { ANALYSIS_DECREMENT_COUNT, ERROR_MESSAGES } from '@/constants';
import { asyncHandler, throwCustomError } from '@/utils';
import {
ANALYSIS_MODEL_OPTIONS,
AIModelKey,
ANALYSIS_MODEL_OPTION,
AnalysisModel,
ANALYZER_REDIS_SCHEMA,
decrementRedisCounters,
GPTModel,
redis,
validateAndRepairJSON,
} from '@/services';
import { checkModelField, checkSentenceField } from '@/validators';
import { handleValidationErrors, validateAnalysisCount } from '@/middlewares';
import { HumanMessage, SystemMessage } from 'langchain/schema';
import { ChatOpenAI } from 'langchain/chat_models/openai';
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
import { ChatOpenAI, type ChatOpenAICallOptions } from '@langchain/openai';

type RequestBody = { sentence: string[]; model: GPTModel };
type RequestBody = { sentence: string[]; model: AnalysisModel };

const { RETRIEVE_FAILED, GENERATE_FAILED } = ERROR_MESSAGES;
const { KEYS, FIELDS } = ANALYZER_REDIS_SCHEMA;
Expand All @@ -39,22 +40,28 @@ export const createAnalysis = [
}),
];

const retrieveAnalysisPrompt = async (model: GPTModel) => {
const { promptField } = ANALYSIS_MODEL_OPTIONS[model];
const retrieveAnalysisPrompt = async (model: AnalysisModel) => {
const { promptField } = ANALYSIS_MODEL_OPTION[model];
const prompt = await redis.hget(KEYS.PROMPT, promptField);

if (!prompt) return throwCustomError(RETRIEVE_FAILED('prompt'), 500);

return prompt;
};

const executeAnalysis = async (sentence: string, model: GPTModel) => {
const { temperature, modelName } = ANALYSIS_MODEL_OPTIONS[model];
const executeAnalysis = async (sentence: string, model: AnalysisModel) => {
const isGPT4 = model === AIModelKey.GPT_4;

const { temperature, modelName } = ANALYSIS_MODEL_OPTION[model];
const prompt = await retrieveAnalysisPrompt(model);
const messages = [new SystemMessage(prompt), new HumanMessage(sentence)];

const chat = new ChatOpenAI({ modelName, temperature });
const { content } = await chat.call(messages);
const callOptions: ChatOpenAICallOptions = {};
// json_object response_format only support GPT-4-Turbo
if (isGPT4) callOptions.response_format = { type: 'json_object' };

const chat = new ChatOpenAI({ modelName, temperature }).bind(callOptions);
const { content } = await chat.invoke(messages);

if (!content) return throwCustomError(GENERATE_FAILED('analysis'), 500);
return await validateAndRepairJSON(`${content}`);
Expand Down
14 changes: 9 additions & 5 deletions src/controllers/analyzer/getRandomSentences.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import { asyncHandler, throwCustomError } from '@/utils';
import { ParamsDictionary } from 'express-serve-static-core';
import {
AI_MODEL,
AIModelKey,
ANALYZER_REDIS_SCHEMA,
decrementRedisCounters,
GPTModel,
redis,
} from '@/services';
import { ERROR_MESSAGES, RandomSentenceParams } from '@/constants';
Expand All @@ -13,8 +14,8 @@ import {
checkTopicsField,
} from '@/validators';
import { handleValidationErrors, validateAnalysisCount } from '@/middlewares';
import { PromptTemplate } from 'langchain/prompts';
import { OpenAI } from 'langchain/llms/openai';
import { PromptTemplate } from '@langchain/core/prompts';
import { OpenAI } from '@langchain/openai';

const { RETRIEVE_FAILED, GENERATE_FAILED } = ERROR_MESSAGES;
const { KEYS, FIELDS } = ANALYZER_REDIS_SCHEMA;
Expand Down Expand Up @@ -55,9 +56,12 @@ const retrieveRandomSentencePrompt = async (query: RandomSentenceParams) => {

const generateRandomSentences = async (query: RandomSentenceParams) => {
const prompt = await retrieveRandomSentencePrompt(query);
const llm = new OpenAI({ temperature: 1, modelName: GPTModel.GPT_3 });
const llm = new OpenAI({
temperature: 1,
modelName: AI_MODEL[AIModelKey.GPT_3_5],
});

const sentences = await llm.predict(prompt);
const sentences = await llm.invoke(prompt);
if (!sentences) return throwCustomError(GENERATE_FAILED('sentence'), 500);

return sentences;
Expand Down
6 changes: 3 additions & 3 deletions src/server.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { setupRoutes } from '@/routes';
import { createServer, logger, PORT } from '@/config';
import { createServer, envConfig, logger } from '@/config';
import { initRedisKeys, redis } from '@/services';
import { errorHandler, notFoundHandler } from '@/middlewares';
import { initSchedulers } from '@/schedulers';
Expand Down Expand Up @@ -31,8 +31,8 @@ const initServer = async () => {
app.use(notFoundHandler);
app.use(errorHandler);

app.listen(PORT, () => {
logger.info(`Server is running on port ${PORT}`);
app.listen(envConfig.port, () => {
logger.info(`Server is running on port ${envConfig.port}`);
});
};

Expand Down
28 changes: 18 additions & 10 deletions src/services/openAI/openAI.ts
Original file line number Diff line number Diff line change
@@ -1,26 +1,34 @@
import { FINE_TUNED_ID } from '@/config';
import { envConfig } from '@/config';
import { ANALYZER_REDIS_SCHEMA } from '@/services/redis';

const { FIELDS } = ANALYZER_REDIS_SCHEMA;

const FINE_TUNE_SUFFIX = '_fine_tuned';

export enum GPTModel {
GPT_3 = 'gpt-3.5-turbo',
export enum AIModelKey {
GPT_3_5 = 'gpt-3.5',
GPT_3_5_FT = 'gpt-3.5-ft',
GPT_4 = 'gpt-4',
}

export const GPT_MODEL_LIST = Object.values(GPTModel);
export const AI_MODEL = {
[AIModelKey.GPT_3_5]: envConfig.modelNames.gpt_3_5,
[AIModelKey.GPT_3_5_FT]: envConfig.modelNames.gpt_3_5_FT,
[AIModelKey.GPT_4]: envConfig.modelNames.gpt_4,
};

export const ANALYSIS_MODEL_OPTIONS = {
[GPTModel.GPT_3]: {
export const ANALYSIS_MODEL_OPTION = {
[AIModelKey.GPT_3_5_FT]: {
promptField: FIELDS.ANALYSIS + FINE_TUNE_SUFFIX,
temperature: 0.6,
modelName: FINE_TUNED_ID ?? GPTModel.GPT_3,
modelName: AI_MODEL[AIModelKey.GPT_3_5_FT],
},
[GPTModel.GPT_4]: {
[AIModelKey.GPT_4]: {
promptField: FIELDS.ANALYSIS,
temperature: 0.4,
modelName: GPTModel.GPT_4,
modelName: AI_MODEL[AIModelKey.GPT_4],
},
} as const;
};

export const ANALYSIS_MODEL = Object.keys(ANALYSIS_MODEL_OPTION);
export type AnalysisModel = keyof typeof ANALYSIS_MODEL_OPTION;
11 changes: 7 additions & 4 deletions src/services/openAI/openAIUtils.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { logger } from '@/config';
import { OpenAI } from 'langchain/llms/openai';
import { OpenAI } from '@langchain/openai';
import { throwCustomError } from '@/utils/customError';
import { GPTModel } from '@/services';
import { AI_MODEL, AIModelKey } from '@/services';

/**
* When using fine-tuned model, there's a recurring issue where unnecessary ']' and '}' are added.
Expand All @@ -21,10 +21,13 @@ const repairJSONManually = (jsonString: string, errorMessage: string) => {
const repairJSONWithOpenAI = async (jsonString: string) => {
logger.info('Trying to repair JSON using OpenAI');

const llm = new OpenAI({ temperature: 0, modelName: GPTModel.GPT_3 });
const llm = new OpenAI({
temperature: 0,
modelName: AI_MODEL[AIModelKey.GPT_3_5],
});
const prompt = `Fix JSON format and the results should be returned in JSON: ${jsonString}`;

const repaired = await llm.predict(prompt);
const repaired = await llm.invoke(prompt);
return JSON.parse(repaired);
};

Expand Down
Loading

0 comments on commit 364e61c

Please sign in to comment.