Skip to content

Commit

Permalink
Merge pull request #693 from openchatai/feat/sync_cloud
Browse files Browse the repository at this point in the history
Feat/sync cloud
  • Loading branch information
codebanesr committed Mar 12, 2024
2 parents 054a8bf + 630cd0b commit 51f89fe
Show file tree
Hide file tree
Showing 95 changed files with 1,731 additions and 726 deletions.
2 changes: 1 addition & 1 deletion llm-server/.env.example
Original file line number Diff line number Diff line change
@@ -1 +1 @@
OPENAI_API_KEY=sk-xxxxxxxxxxxx#
OPENAI_API_KEY=sk-xxxxxxxxxxxx#
3 changes: 2 additions & 1 deletion llm-server/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -155,4 +155,5 @@ app.bin
app.build/
app.dist/
app.onefile-build/
.DS_Store
.DS_Store
.aider/
14 changes: 7 additions & 7 deletions llm-server/.vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,22 @@
"version": "0.2.0",
"configurations": [
{
"name": "Python Debugger: Flask",
"type": "debugpy",
"name": "Python: Flask",
"type": "python",
"request": "launch",
"module": "flask",
"env": {
"FLASK_APP": "app.py",
"FLASK_ENV": "development",
"FLASK_DEBUG": "1"
},
"args": [
"run",
"--no-debugger",
"--reload",
"--host=0.0.0.0",
"--no-reload",
"--port=8002"
],
"jinja": true,
"justMyCode": true
"jinja": true
},
{
"name": "Python: Celery Workers",
Expand All @@ -37,7 +36,8 @@
"DISABLE_SPRING": "true"
},
"console": "integratedTerminal",
"envFile": "${workspaceFolder}/.env"
"envFile": "${workspaceFolder}/.env",
"python": "/Users/shanurrahman/anaconda3/envs/local/bin/python"
}
]
}
2 changes: 1 addition & 1 deletion llm-server/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@ CMD ["python", "-m", "debugpy", "--listen", "0.0.0.0:5678", "--wait-for-client",
# Production stage
FROM common AS production
EXPOSE 8002
CMD ["python", "-m", "flask", "run", "--host=0.0.0.0", "--port=8002", "--reload"]
CMD ["python", "-m", "flask", "run", "--host=0.0.0.0", "--port=8002", "--reload"]
107 changes: 79 additions & 28 deletions llm-server/app.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import asyncio
import requests
from dotenv import load_dotenv
from flask import Flask, request
from werkzeug.exceptions import MethodNotAllowed, HTTPException, NotFound
from flask import Flask, render_template, request
from flask import jsonify
from flask_jwt_extended import JWTManager
from utils.vector_store_setup import init_qdrant_collections
import traceback
from routes.action.action_controller import action
Expand All @@ -18,57 +19,85 @@
)

from routes.uploads.upload_controller import upload_controller
from shared.models.opencopilot_db import create_database_schema

# from shared.models.opencopilot_db import create_database_schema
from shared.models.opencopilot_db.database_setup import create_database_schema
from utils.config import Config
from routes.chat.chat_dto import ChatInput
from werkzeug.exceptions import HTTPException

from flask_socketio import SocketIO
from utils.get_logger import CustomLogger
from utils.get_logger import SilentException
from routes.search.search_controller import search_workflow
from routes.transformers.zid import (
transformers_workflow,
) # todo move the trans. to it's own file

from flask_cors import CORS
from shared.models.opencopilot_db.database_setup import engine
from sqlalchemy.orm import sessionmaker
from utils.llm_consts import JWT_SECRET_KEY
import sentry_sdk

SessionLocal = sessionmaker(bind=engine)
sentry_sdk.init(traces_sample_rate=1.0, profiles_sample_rate=1.0)

logger = CustomLogger(__name__)
SessionLocal = sessionmaker(bind=engine)

load_dotenv()

create_database_schema()
app = Flask(__name__)

# @Todo only allow for cloud and porter [for later]
CORS(app)
app.config["JWT_SECRET_KEY"] = JWT_SECRET_KEY # Change this to a random secret key
JWTManager(app)
# CORS(app)
socketio = SocketIO(app, cors_allowed_origins="*")

app.after_request(log_api_call)

app.url_map.strict_slashes = False


app.register_blueprint(flow, url_prefix="/backend/flows")
app.register_blueprint(chat_workflow, url_prefix="/backend/chat")
app.register_blueprint(copilot, url_prefix="/backend/copilot")
app.register_blueprint(upload_controller, url_prefix="/backend/uploads")
app.register_blueprint(api_call_controller, url_prefix="/backend/api_calls")
app.register_blueprint(datasource_workflow, url_prefix="/backend/data_sources")

app.register_blueprint(action, url_prefix="/backend/actions")
app.register_blueprint(powerup, url_prefix="/backend/powerup")
app.register_blueprint(transformers_workflow, url_prefix="/backend/transformers")
app.register_blueprint(search_workflow, url_prefix="/backend/search")

app.config.from_object(Config)
socketio = SocketIO(app, cors_allowed_origins="*")


@app.errorhandler(HTTPException)
def handle_http_exception(error):
# Log the error or perform any other necessary actions
SilentException.capture_exception(error)
return jsonify({"error": error.name, "message": error.description}), error.code


@app.errorhandler(NotFound)
def handle_not_found(error):
SilentException.capture_exception(error)
return (
jsonify(
{
"error": "Not Found",
"message": "The requested URL was not found on the server.",
}
),
404,
)


@app.errorhandler(Exception)
def handle_exception(error):
# If the exception is an HTTPException (includes 4XX and 5XX errors)
if isinstance(error, HTTPException):
# Log the error or perform any other necessary actions
logger.error("HTTP Error", error=error)
return jsonify({"error": error.name, "message": error.description}), error.code

traceback.print_exc()
SilentException.capture_exception(error)
return (
jsonify(
{
Expand All @@ -82,13 +111,15 @@ def handle_exception(error):

@socketio.on("send_chat")
def handle_send_chat(json_data):
input_data = ChatInput(**json_data)
message = input_data.content
session_id = input_data.session_id
headers_from_json = input_data.headers
# headers = request.headers
bot_token = input_data.bot_token
# bot_token = headers.environ.get("HTTP_X_BOT_TOKEN")
user_message = ChatInput(**json_data)
message = user_message.content
session_id = user_message.session_id
headers_from_json = user_message.headers
bot_token = user_message.bot_token
extra_params = user_message.extra_params or {}
incoming_message_id = (
user_message.id or None
) # incoming message (assigned by the client)

json_data = {
"url": request.base_url,
Expand All @@ -98,17 +129,37 @@ def handle_send_chat(json_data):
"method": "wss",
}

# if not bot_token:
# socketio.emit(session_id, {"error": "Bot token is required"})
# return

asyncio.run(send_chat_stream(message, bot_token, session_id, headers_from_json))
asyncio.run(
send_chat_stream(
message,
bot_token,
session_id,
headers_from_json,
extra_params,
incoming_message_id,
)
)
log_opensource_telemetry_data(json_data)


@app.route("/backend/demo/guild_quality")
def home():
return render_template("index.html")


@app.route("/backend/demo/zid")
def zid():
return render_template("zid.html")


@app.route("/backend/demo/justpaid")
def justpaid():
return render_template("justpaid.html")


init_qdrant_collections()

if __name__ == "__main__":
socketio.run(
app, host="0.0.0.0", port=8002, debug=True, use_reloader=True, log_output=False
app, host="0.0.0.0", port=8002, debug=True, use_reloader=True, log_output=True
)
15 changes: 11 additions & 4 deletions llm-server/celery_app.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
import os
from dotenv import load_dotenv
import sentry_sdk
from celery import Celery
from shared.models.opencopilot_db import create_database_schema
from sentry_sdk.integrations.celery import CeleryIntegration

# Load environment variables from .env file
load_dotenv()

from celery import Celery
from shared.models.opencopilot_db import create_database_schema
sentry_sdk.init(
traces_sample_rate=1.0, profiles_sample_rate=1.0, integrations=[CeleryIntegration()]
)


create_database_schema()
Expand All @@ -14,5 +18,8 @@
broker=os.getenv("CELERY_BROKER", "redis://redis:6379/0"),
backend=os.getenv("CELERY_BACKEND", "redis://redis:6379/1"),
)

app.conf.imports = ("workers.tasks",)

app.conf.broker_connection_max_retries = 5
app.conf.broker_connection_retry = True
app.conf.broker_connection_retry_on_startup = True
1 change: 1 addition & 0 deletions llm-server/copilot_exceptions/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .api_call_failed_exception import APICallFailedException
4 changes: 2 additions & 2 deletions llm-server/custom_types/response_dict.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import TypedDict, Optional, Dict, List
from typing import TypedDict, Optional, Dict, List, Any
from dataclasses import dataclass, field


Expand All @@ -9,7 +9,7 @@ class ResponseDict(TypedDict):

@dataclass
class ApiRequestResult:
api_requests: Dict[str, str] = field(default_factory=dict)
api_requests: Dict[str, Any] = field(default_factory=dict)


@dataclass
Expand Down
13 changes: 13 additions & 0 deletions llm-server/dependencies/database.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from sqlalchemy.orm import sessionmaker
from contextlib import contextmanager
from shared.models.opencopilot_db import engine

SessionLocal = sessionmaker(bind=engine)

@contextmanager
def get_db_session():
db = SessionLocal()
try:
yield db
finally:
db.close()
1 change: 0 additions & 1 deletion llm-server/entities/action_entity.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
from .utils import generate_operation_id_from_name
from dataclasses import dataclass


@dataclass
class ActionDTO(BaseModel):
id: Optional[str] = None
Expand Down
24 changes: 20 additions & 4 deletions llm-server/extractors/convert_json_to_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,9 @@
from langchain.schema import HumanMessage, SystemMessage, BaseMessage

from utils.get_chat_model import get_chat_model
from utils.get_logger import CustomLogger
from flask_socketio import emit
from entities.action_entity import ActionDTO

openai_api_key = os.getenv("OPENAI_API_KEY")
logger = CustomLogger(module_name=__name__)


def convert_json_to_text(
Expand Down Expand Up @@ -93,14 +90,33 @@ def create_readable_error(
return cast(str, result)


def create_readable_fill_the_form_message(
user_input: str
) -> str:
system_message = SystemMessage(
content="""As an AI chat assistant, your role involves requesting from the user to fill the form, you will be only called when the user request require some information to be filled.
prompt the user to fill the form so you can continue with their request
For example, you can say "Please fill the form so I can continue with your request XXX, make sure to fill all the required fields" but tylor the message to the user input and request
make sure it's less than 70 words
"""
)

messages: List[HumanMessage] = []
messages.append(HumanMessage(content=f"Here is the user input: \n\n{user_input}"))

# todo @shanur please help me to return ok message




def stream_messages(
system_message: SystemMessage,
messages: List[HumanMessage],
is_streaming: bool,
session_id: str,
tag: str,
) -> str:
chat = get_chat_model()
chat = get_chat_model(tag)

all_messages: List[BaseMessage] = []
all_messages.append(system_message)
Expand Down
10 changes: 2 additions & 8 deletions llm-server/extractors/extract_body.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@

from typing import Any, Optional
import importlib
from utils.get_logger import CustomLogger

logger = CustomLogger(module_name=__name__)

openai_api_key = os.getenv("OPENAI_API_KEY")
llm = get_llm()
Expand All @@ -22,7 +19,7 @@ async def gen_body_from_schema(
app: Optional[str],
current_state: Optional[str],
) -> Any:
chat = get_chat_model()
chat = get_chat_model("gen_body_from_schema")
api_generation_prompt = None
if app:
module_name = f"integrations.custom_prompts.{app}"
Expand Down Expand Up @@ -50,9 +47,6 @@ async def gen_body_from_schema(

result = chat(messages)

logger.info("LLM Body Response", content=result.content, text=text, app=app)

d: Any = extract_json_payload(result.content)
logger.info("Parsed the json payload", payload=d, app=app, api_generation_prompt=api_generation_prompt)


return d
Loading

0 comments on commit 51f89fe

Please sign in to comment.