Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Integrates LiteLLM for Unified Access to Multiple LLM Models #5925

Open
wants to merge 10 commits into
base: preview
Choose a base branch
from
137 changes: 73 additions & 64 deletions apiserver/plane/app/views/external/base.py
Original file line number Diff line number Diff line change
@@ -1,74 +1,97 @@
# Python imports
import requests
# Python import
import os

# Third party imports
from openai import OpenAI
from rest_framework.response import Response
# Third party import
import litellm
import requests

from rest_framework import status
from rest_framework.response import Response

# Django imports
# Module import
from plane.app.permissions import ROLE, allow_permission
from plane.app.serializers import (ProjectLiteSerializer,
WorkspaceLiteSerializer)
from plane.db.models import Project, Workspace
from plane.license.utils.instance_value import get_configuration_value

# Module imports
from ..base import BaseAPIView
from plane.app.permissions import allow_permission, ROLE
from plane.db.models import Workspace, Project
from plane.app.serializers import ProjectLiteSerializer, WorkspaceLiteSerializer
from plane.license.utils.instance_value import get_configuration_value


def get_gpt_config():
"""Helper to get GPT configuration values"""
OPENAI_API_KEY, GPT_ENGINE = get_configuration_value([
{
"key": "OPENAI_API_KEY",
"default": os.environ.get("OPENAI_API_KEY", None),
},
{
"key": "GPT_ENGINE",
"default": os.environ.get("GPT_ENGINE", "gpt-4o-mini"),
akash5100 marked this conversation as resolved.
Show resolved Hide resolved
},
])

if not OPENAI_API_KEY or not GPT_ENGINE:
return None, None
return OPENAI_API_KEY, GPT_ENGINE
akash5100 marked this conversation as resolved.
Show resolved Hide resolved


def get_gpt_response(task, prompt, api_key, engine):
"""Helper to get GPT completion response"""
final_text = task + "\n" + prompt
try:
response = litellm.completion(
model=engine,
messages=[{"role": "user", "content": final_text}],
api_key=api_key,
)
text = response.choices[0].message.content.strip()
return text, None
except Exception as e:
return None, str(e)
sriramveeraghanta marked this conversation as resolved.
Show resolved Hide resolved


class GPTIntegrationEndpoint(BaseAPIView):
@allow_permission([ROLE.ADMIN, ROLE.MEMBER])
def post(self, request, slug, project_id):
OPENAI_API_KEY, GPT_ENGINE = get_configuration_value(
[
{
"key": "OPENAI_API_KEY",
"default": os.environ.get("OPENAI_API_KEY", None),
},
{
"key": "GPT_ENGINE",
"default": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"),
},
]
)
OPENAI_API_KEY, GPT_ENGINE = get_gpt_config()

supported_models = ["gpt-4o-mini", "gpt-4o"]
if GPT_ENGINE not in supported_models:
return Response(
{"error": f"Unsupported model. Please use one of: {', '.join(supported_models)}"},
status=status.HTTP_400_BAD_REQUEST,
)
plane-bot marked this conversation as resolved.
Show resolved Hide resolved

# Get the configuration value
# Check the keys
if not OPENAI_API_KEY or not GPT_ENGINE:
return Response(
{"error": "OpenAI API key and engine is required"},
status=status.HTTP_400_BAD_REQUEST,
)

prompt = request.data.get("prompt", False)
task = request.data.get("task", False)

if not task:
return Response(
{"error": "Task is required"}, status=status.HTTP_400_BAD_REQUEST
)

final_text = task + "\n" + prompt

client = OpenAI(api_key=OPENAI_API_KEY)

response = client.chat.completions.create(
model=GPT_ENGINE, messages=[{"role": "user", "content": final_text}]
)
text, error = get_gpt_response(task, request.data.get("prompt", False), OPENAI_API_KEY, GPT_ENGINE)
if not text and error:
return Response(
{"error": "An internal error has occurred."},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)

workspace = Workspace.objects.get(slug=slug)
project = Project.objects.get(pk=project_id)

text = response.choices[0].message.content.strip()
text_html = text.replace("\n", "<br/>")
return Response(
{
"response": text,
"response_html": text_html,
"response_html": text.replace("\n", "<br/>"),
"project_detail": ProjectLiteSerializer(project).data,
"workspace_detail": WorkspaceLiteSerializer(workspace).data,
},

Check warning

Code scanning / CodeQL

Information exposure through an exception Medium library

Stack trace information
flows to this location and may be exposed to an external user.
status=status.HTTP_200_OK,
)

Expand All @@ -76,47 +99,33 @@
class WorkspaceGPTIntegrationEndpoint(BaseAPIView):
@allow_permission(allowed_roles=[ROLE.ADMIN, ROLE.MEMBER], level="WORKSPACE")
def post(self, request, slug):
OPENAI_API_KEY, GPT_ENGINE = get_configuration_value(
[
{
"key": "OPENAI_API_KEY",
"default": os.environ.get("OPENAI_API_KEY", None),
},
{
"key": "GPT_ENGINE",
"default": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"),
},
]
)

# Get the configuration value
# Check the keys
OPENAI_API_KEY, GPT_ENGINE = get_gpt_config()

if not OPENAI_API_KEY or not GPT_ENGINE:
return Response(
{"error": "OpenAI API key and engine is required"},
status=status.HTTP_400_BAD_REQUEST,
)

prompt = request.data.get("prompt", False)
task = request.data.get("task", False)

if not task:
return Response(
{"error": "Task is required"}, status=status.HTTP_400_BAD_REQUEST
)

final_text = task + "\n" + prompt

client = OpenAI(api_key=OPENAI_API_KEY)

response = client.chat.completions.create(
model=GPT_ENGINE, messages=[{"role": "user", "content": final_text}]
)
text, error = get_gpt_response(task, request.data.get("prompt", False), OPENAI_API_KEY, GPT_ENGINE)
if not text and error:
return Response(
{"error": "An internal error has occurred."},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)

text = response.choices[0].message.content.strip()
text_html = text.replace("\n", "<br/>")
return Response(
{"response": text, "response_html": text_html}, status=status.HTTP_200_OK
{
"response": text,
"response_html": text.replace("\n", "<br/>"),
},
Fixed Show fixed Hide fixed
status=status.HTTP_200_OK,
)


Expand Down
2 changes: 1 addition & 1 deletion apiserver/requirements/base.txt
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ uvicorn==0.29.0
# sockets
channels==4.1.0
# ai
openai==1.25.0
litellm==1.51.0
# slack
slack-sdk==3.27.1
# apm
Expand Down
Loading