Skip to content

Commit

Permalink
šŸ“ (environment.ts): Update environment variables to match the changesā€¦
Browse files Browse the repository at this point in the history
ā€¦ in the .env.example file
  • Loading branch information
romantech committed Apr 5, 2024
1 parent 44d9fde commit 8235c7c
Show file tree
Hide file tree
Showing 8 changed files with 76 additions and 37 deletions.
9 changes: 6 additions & 3 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
PORT=3001
NODE_ENV=development # koyeb ķ™˜ź²½ ė³€ģˆ˜ ģ„¤ģ •ģ“ėž‘ ė§žģ¶”źø° ģœ„ķ•“ ģ¶”ź°€
CORS_ORIGIN=

OPENAI_API_KEY=

REDIS_HOST=
REDIS_PORT=
REDIS_USERNAME=
REDIS_PASSWORD=
CORS_ORIGIN=
FINE_TUNED_ID=


MODEL_NAME_GPT_3_5_FT=
MODEL_NAME_GPT_3_5=
MODEL_NAME_GPT_4=
20 changes: 15 additions & 5 deletions src/config/environment.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,18 @@ import { env } from 'node:process';
export enum EnvVars {
PORT = 'PORT',
NODE_ENV = 'NODE_ENV',
CORS_ORIGIN = 'CORS_ORIGIN',

OPENAI_API_KEY = 'OPENAI_API_KEY',

REDIS_HOST = 'REDIS_HOST',
REDIS_PORT = 'REDIS_PORT',
REDIS_PASSWORD = 'REDIS_PASSWORD',
REDIS_USERNAME = 'REDIS_USERNAME',
CORS_ORIGIN = 'CORS_ORIGIN',
FINE_TUNED_ID = 'FINE_TUNED_ID',

MODEL_NAME_GPT_3_5_FT = 'MODEL_NAME_GPT_3_5_FT',
MODEL_NAME_GPT_3_5 = 'MODEL_NAME_GPT_3_5',
MODEL_NAME_GPT_4 = 'MODEL_NAME_GPT_4',
}

/**
Expand All @@ -19,10 +24,15 @@ export enum EnvVars {
* */
export const isProd = () => env[EnvVars.NODE_ENV] === 'production';
export const PORT = isProd() ? env[EnvVars.PORT] : 3001;
export const CORS_ORIGIN = env[EnvVars.CORS_ORIGIN]?.split(',') ?? [];

export const OPENAI_API_KEY = env[EnvVars.OPENAI_API_KEY];

export const REDIS_HOST = env[EnvVars.REDIS_HOST];
export const REDIS_PORT = Number(env[EnvVars.REDIS_PORT]);
export const REDIS_PASSWORD = env[EnvVars.REDIS_PASSWORD];
export const REDIS_USERNAME = env[EnvVars.REDIS_USERNAME];
export const OPENAI_API_KEY = env[EnvVars.OPENAI_API_KEY];
export const CORS_ORIGIN = env[EnvVars.CORS_ORIGIN]?.split(',') ?? [];
export const FINE_TUNED_ID = env[EnvVars.FINE_TUNED_ID];

export const MODEL_NAME_GPT_3_5_FT = env[EnvVars.MODEL_NAME_GPT_3_5_FT];
export const MODEL_NAME_GPT_3_5 = env[EnvVars.MODEL_NAME_GPT_3_5];
export const MODEL_NAME_GPT_4 = env[EnvVars.MODEL_NAME_GPT_4];
6 changes: 3 additions & 3 deletions src/constants/analyzer.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import { GPTModel } from '@/services';
import { AIModelKey } from '@/services';

export const ANALYSIS_DECREMENT_COUNT = {
[GPTModel.GPT_3]: 1,
[GPTModel.GPT_4]: 5,
[AIModelKey.GPT_3_5_FT]: 1,
[AIModelKey.GPT_4]: 5,
};

export enum RandomSentenceParam {
Expand Down
25 changes: 16 additions & 9 deletions src/controllers/analyzer/createAnalysis.ts
Original file line number Diff line number Diff line change
@@ -1,19 +1,20 @@
import { ANALYSIS_DECREMENT_COUNT, ERROR_MESSAGES } from '@/constants';
import { asyncHandler, throwCustomError } from '@/utils';
import {
ANALYSIS_MODEL_OPTIONS,
AIModelKey,
ANALYSIS_MODEL_OPTION,
AnalysisModel,
ANALYZER_REDIS_SCHEMA,
decrementRedisCounters,
GPTModel,
redis,
validateAndRepairJSON,
} from '@/services';
import { checkModelField, checkSentenceField } from '@/validators';
import { handleValidationErrors, validateAnalysisCount } from '@/middlewares';
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
import { ChatOpenAI } from '@langchain/openai';
import { ChatOpenAI, type ChatOpenAICallOptions } from '@langchain/openai';

type RequestBody = { sentence: string[]; model: GPTModel };
type RequestBody = { sentence: string[]; model: AnalysisModel };

const { RETRIEVE_FAILED, GENERATE_FAILED } = ERROR_MESSAGES;
const { KEYS, FIELDS } = ANALYZER_REDIS_SCHEMA;
Expand All @@ -39,21 +40,27 @@ export const createAnalysis = [
}),
];

const retrieveAnalysisPrompt = async (model: GPTModel) => {
const { promptField } = ANALYSIS_MODEL_OPTIONS[model];
const retrieveAnalysisPrompt = async (model: AnalysisModel) => {
const { promptField } = ANALYSIS_MODEL_OPTION[model];
const prompt = await redis.hget(KEYS.PROMPT, promptField);

if (!prompt) return throwCustomError(RETRIEVE_FAILED('prompt'), 500);

return prompt;
};

const executeAnalysis = async (sentence: string, model: GPTModel) => {
const { temperature, modelName } = ANALYSIS_MODEL_OPTIONS[model];
const executeAnalysis = async (sentence: string, model: AnalysisModel) => {
const isGPT4 = model === AIModelKey.GPT_4;

const { temperature, modelName } = ANALYSIS_MODEL_OPTION[model];
const prompt = await retrieveAnalysisPrompt(model);
const messages = [new SystemMessage(prompt), new HumanMessage(sentence)];

const chat = new ChatOpenAI({ modelName, temperature });
const callOptions: ChatOpenAICallOptions = {};
// json_object response_format only support GPT-4-Turbo
if (isGPT4) callOptions.response_format = { type: 'json_object' };

const chat = new ChatOpenAI({ modelName, temperature }).bind(callOptions);
const { content } = await chat.invoke(messages);

if (!content) return throwCustomError(GENERATE_FAILED('analysis'), 500);
Expand Down
8 changes: 6 additions & 2 deletions src/controllers/analyzer/getRandomSentences.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import { asyncHandler, throwCustomError } from '@/utils';
import { ParamsDictionary } from 'express-serve-static-core';
import {
AI_MODEL,
AIModelKey,
ANALYZER_REDIS_SCHEMA,
decrementRedisCounters,
GPTModel,
redis,
} from '@/services';
import { ERROR_MESSAGES, RandomSentenceParams } from '@/constants';
Expand Down Expand Up @@ -55,7 +56,10 @@ const retrieveRandomSentencePrompt = async (query: RandomSentenceParams) => {

const generateRandomSentences = async (query: RandomSentenceParams) => {
const prompt = await retrieveRandomSentencePrompt(query);
const llm = new OpenAI({ temperature: 1, modelName: GPTModel.GPT_3 });
const llm = new OpenAI({
temperature: 1,
modelName: AI_MODEL[AIModelKey.GPT_3_5],
});

const sentences = await llm.invoke(prompt);
if (!sentences) return throwCustomError(GENERATE_FAILED('sentence'), 500);
Expand Down
32 changes: 22 additions & 10 deletions src/services/openAI/openAI.ts
Original file line number Diff line number Diff line change
@@ -1,26 +1,38 @@
import { FINE_TUNED_ID } from '@/config';
import {
MODEL_NAME_GPT_3_5,
MODEL_NAME_GPT_3_5_FT,
MODEL_NAME_GPT_4,
} from '@/config';
import { ANALYZER_REDIS_SCHEMA } from '@/services/redis';

const { FIELDS } = ANALYZER_REDIS_SCHEMA;

const FINE_TUNE_SUFFIX = '_fine_tuned';

export enum GPTModel {
GPT_3 = 'gpt-3.5-turbo',
export enum AIModelKey {
GPT_3_5 = 'gpt-3.5',
GPT_3_5_FT = 'gpt-3.5-ft',
GPT_4 = 'gpt-4',
}

export const GPT_MODEL_LIST = Object.values(GPTModel);
export const AI_MODEL = {
[AIModelKey.GPT_3_5]: MODEL_NAME_GPT_3_5,
[AIModelKey.GPT_3_5_FT]: MODEL_NAME_GPT_3_5_FT,
[AIModelKey.GPT_4]: MODEL_NAME_GPT_4,
};

export const ANALYSIS_MODEL_OPTIONS = {
[GPTModel.GPT_3]: {
export const ANALYSIS_MODEL_OPTION = {
[AIModelKey.GPT_3_5_FT]: {
promptField: FIELDS.ANALYSIS + FINE_TUNE_SUFFIX,
temperature: 0.6,
modelName: FINE_TUNED_ID ?? GPTModel.GPT_3,
modelName: AI_MODEL[AIModelKey.GPT_3_5_FT],
},
[GPTModel.GPT_4]: {
[AIModelKey.GPT_4]: {
promptField: FIELDS.ANALYSIS,
temperature: 0.4,
modelName: GPTModel.GPT_4,
modelName: AI_MODEL[AIModelKey.GPT_4],
},
} as const;
};

export const ANALYSIS_MODEL = Object.keys(ANALYSIS_MODEL_OPTION);
export type AnalysisModel = keyof typeof ANALYSIS_MODEL_OPTION;
7 changes: 5 additions & 2 deletions src/services/openAI/openAIUtils.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { logger } from '@/config';
import { OpenAI } from '@langchain/openai';
import { throwCustomError } from '@/utils/customError';
import { GPTModel } from '@/services';
import { AI_MODEL, AIModelKey } from '@/services';

/**
* When using fine-tuned model, there's a recurring issue where unnecessary ']' and '}' are added.
Expand All @@ -21,7 +21,10 @@ const repairJSONManually = (jsonString: string, errorMessage: string) => {
const repairJSONWithOpenAI = async (jsonString: string) => {
logger.info('Trying to repair JSON using OpenAI');

const llm = new OpenAI({ temperature: 0, modelName: GPTModel.GPT_3 });
const llm = new OpenAI({
temperature: 0,
modelName: AI_MODEL[AIModelKey.GPT_3_5],
});
const prompt = `Fix JSON format and the results should be returned in JSON: ${jsonString}`;

const repaired = await llm.invoke(prompt);
Expand Down
6 changes: 3 additions & 3 deletions src/validators/analyzer.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { body, query } from 'express-validator';
import { ERROR_MESSAGES, RandomSentenceParam } from '@/constants';
import { GPT_MODEL_LIST } from '@/services';
import { ANALYSIS_MODEL } from '@/services';

const { MISSING_FIELD } = ERROR_MESSAGES;
const { SENT_COUNT, TOPICS, MAX_CHARS } = RandomSentenceParam;
Expand All @@ -12,9 +12,9 @@ export const checkSentenceField = body('sentence')

/** GET /analysis/random-sentence */
export const checkModelField = body('model')
.isIn(GPT_MODEL_LIST)
.isIn(ANALYSIS_MODEL)
.withMessage(
`Invalid model value. Allowed values are '${GPT_MODEL_LIST.join(', ')}'`,
`Invalid model value. Allowed values are '${ANALYSIS_MODEL.join(', ')}'`,
);

// ģƒģ„±ķ•  ģ˜ģ–“ ė¬øģž„ ź°Æģˆ˜ optional
Expand Down

0 comments on commit 8235c7c

Please sign in to comment.