Skip to content

Commit

Permalink
Merge pull request #1365 from mito-ds/mito-ai-lambda-function
Browse files Browse the repository at this point in the history
Mito AI Server
  • Loading branch information
aarondr77 authored Nov 18, 2024
2 parents 64645f4 + 756d523 commit b14eb6e
Show file tree
Hide file tree
Showing 6 changed files with 104 additions and 37 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/test-mito-ai.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ jobs:
strategy:
matrix:
python-version: ['3.8', '3.10', '3.11']
use-mito-ai-server: [true, false]
fail-fast: false

steps:
Expand Down Expand Up @@ -54,7 +55,7 @@ jobs:
jupyter lab --config jupyter_server_test_config.py &
npm run test:mitoai
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_API_KEY: ${{ matrix.use-mito-ai-server && '' || secrets.OPENAI_API_KEY }}
- name: Upload test-results
uses: actions/upload-artifact@v3
if: failure()
Expand Down
31 changes: 3 additions & 28 deletions mito-ai/mito-ai/OpenAICompletionHandler.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
import os
import openai
from jupyter_server.base.handlers import APIHandler
from tornado import web
import json
from openai import OpenAI
from .utils.open_ai_utils import get_open_ai_completion


# This handler is responsible for the mito_ai/completion endpoint.
Expand All @@ -17,33 +15,10 @@ def post(self):
data = self.get_json_body()
messages = data.get('messages', '')

# Get the OpenAI API key from environment variables
openai_api_key = os.getenv('OPENAI_API_KEY')
if not openai_api_key:
# If the API key is not set, return a 401 unauthorized error
self.set_status(401)
self.finish(json.dumps({"response": "OPENAI_API_KEY not set"}))
return

# Set up the OpenAI client
openai.api_key = openai_api_key
client = OpenAI()

try:
# Query OpenAI API
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=messages
)

response_dict = response.to_dict()

# Send the response back to the frontend
# TODO: In the future, instead of returning the raw response,
# return a cleaned up version of the response so we can support
# multiple models

self.finish(json.dumps(response_dict))
response = get_open_ai_completion(messages)
self.finish(json.dumps(response))
except Exception as e:
self.set_status(500)
self.finish(json.dumps({"response": f"Error: {str(e)}"}))
Empty file.
83 changes: 83 additions & 0 deletions mito-ai/mito-ai/utils/open_ai_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
#!/usr/bin/env python
# coding: utf-8

# Copyright (c) Saga Inc.

import os
import requests
from typing import Any, Dict, List

OPEN_AI_URL = 'https://api.openai.com/v1/chat/completions'
MITO_AI_URL = 'https://ogtzairktg.execute-api.us-east-1.amazonaws.com/Prod/completions/'

OPEN_SOURCE_AI_COMPLETIONS_LIMIT = 100

def _get_ai_completion_data(messages: List[Dict[str, Any]]) -> Dict[str, Any]:
return {
"model": "gpt-4o-mini",
"messages": messages,
"temperature": 0,
}

__user_email = None
__user_id = None
__num_usages = None

def _get_ai_completion_from_mito_server(ai_completion_data: Dict[str, Any]) -> Dict[str, Any]:

data = {
'email': __user_email,
'user_id': __user_id,
'data': ai_completion_data
}

headers = {
'Content-Type': 'application/json',
}

try:
res = requests.post(MITO_AI_URL, headers=headers, json=data)

# If the response status code is in the 200s, this does nothing
# If the response status code indicates an error (4xx or 5xx),
# raise an HTTPError exception with details about what went wrong
res.raise_for_status()

# The lambda function returns a dictionary with a completion entry in it,
# so we just return that.
return res.json()

except Exception as e:
print('Error using mito server', e)
raise e


def get_open_ai_completion(messages: List[Dict[str, Any]]) -> Dict[str, Any]:

OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
ai_completion_data = _get_ai_completion_data(messages)

if OPENAI_API_KEY is None:
# If they don't have an Open AI key,
# use the mito server to get a completion
completion = _get_ai_completion_from_mito_server(ai_completion_data)
return completion

headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {OPENAI_API_KEY}'
}

try:
res = requests.post(OPEN_AI_URL, headers=headers, json=ai_completion_data)

# If the response status code is in the 200s, this does nothing
# If the response status code indicates an error (4xx or 5xx),
# raise an HTTPError exception with details about what went wrong
res.raise_for_status()

completion = res.json()['choices'][0]['message']['content']
return {'completion': completion}
except Exception as e:
raise e

5 changes: 2 additions & 3 deletions mito-ai/src/Extensions/AiChat/ChatTaskpane.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -165,12 +165,11 @@ const ChatTaskpane: React.FC<IChatTaskpaneProps> = ({
});

if (apiResponse.type === 'success') {

const response = apiResponse.response;
const aiMessage = response.choices[0].message;
const aiMessage = apiResponse.response;

newChatHistoryManager.addAIMessageFromResponse(aiMessage);
setChatHistoryManager(newChatHistoryManager);

aiRespone = aiMessage
} else {
newChatHistoryManager.addAIMessageFromMessageContent(apiResponse.errorMessage, true)
Expand Down
19 changes: 14 additions & 5 deletions mito-ai/src/utils/handler.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import { URLExt } from '@jupyterlab/coreutils';
import { ServerConnection } from '@jupyterlab/services';
import OpenAI from 'openai';
import OpenAI from "openai";


export type SuccessfulAPIResponse = {
'type': 'success',
response: OpenAI.Chat.ChatCompletion
response: OpenAI.Chat.Completions.ChatCompletionMessage
}
export type FailedAPIResponse = {
type: 'error',
Expand Down Expand Up @@ -41,8 +41,8 @@ export async function requestAPI(

// Merge default headers with any provided headers
init.headers = {
...defaultHeaders,
...init.headers,
...defaultHeaders,
...init.headers,
};

// Make the request
Expand Down Expand Up @@ -83,9 +83,18 @@ export async function requestAPI(

try {
data = JSON.parse(data);

// TODO: Update the lambda funciton to return the entire message instead of
// just the content so we don't have to recreate the message here.
const aiMessage: OpenAI.Chat.Completions.ChatCompletionMessage = {
role: 'assistant',
content: data['completion'],
refusal: null
}

return {
type: 'success',
response: data
response: aiMessage
}
} catch (error) {
console.error('Not a JSON response body.', response);
Expand Down

0 comments on commit b14eb6e

Please sign in to comment.