Skip to content

Commit

Permalink
fixed bug: endpoint error
Browse files Browse the repository at this point in the history
  • Loading branch information
yym68686 committed Nov 26, 2023
1 parent b133bde commit 2663bf0
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 30 deletions.
25 changes: 5 additions & 20 deletions chatgpt2api/chatgpt2api.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,21 +55,6 @@ def get_filtered_keys_from_object(obj: object, *keys: str) -> Set[str]:
"claude-2",
]

class openaiAPI:
def __init__(
self,
api_url: str = (os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions"),
):
from urllib.parse import urlparse, urlunparse
self.source_api_url: str = api_url
parsed_url = urlparse(self.source_api_url)
self.base_url: str = urlunparse(parsed_url[:2] + ("",) * 4)
self.v1_url: str = urlunparse(parsed_url[:2] + ("/v1",) + ("",) * 3)
self.chat_url: str = urlunparse(parsed_url[:2] + ("/v1/chat/completions",) + ("",) * 3)
self.image_url: str = urlunparse(parsed_url[:2] + ("/v1/images/generations",) + ("",) * 3)

bot_api_url = openaiAPI()

class Imagebot:
def __init__(
self,
Expand All @@ -87,7 +72,7 @@ def dall_e_3(
model: str = None,
**kwargs,
):
url = bot_api_url.image_url
url = config.bot_api_url.image_url
headers = {"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"}

json_post = {
Expand Down Expand Up @@ -292,7 +277,7 @@ def ask_stream(
self.__truncate_conversation(convo_id=convo_id)
# print(self.conversation[convo_id])
# Get response
url = bot_api_url.chat_url
url = config.bot_api_url.chat_url
headers = {"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"}

if self.engine == "gpt-4-1106-preview":
Expand Down Expand Up @@ -413,7 +398,7 @@ async def ask_stream_async(
# Get response
async with self.aclient.stream(
"post",
bot_api_url.chat_url,
config.bot_api_url.chat_url,
headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
json={
"model": model or self.engine,
Expand Down Expand Up @@ -546,8 +531,8 @@ def search_summary(
chatllm = EducationalLLM(callback_manager=CallbackManager([chainStreamHandler]))
chainllm = EducationalLLM()
else:
chatllm = ChatOpenAI(streaming=True, callback_manager=CallbackManager([chainStreamHandler]), temperature=config.temperature, openai_api_base=bot_api_url.v1_url, model_name=self.engine, openai_api_key=config.API)
chainllm = ChatOpenAI(temperature=config.temperature, openai_api_base=bot_api_url.v1_url, model_name=config.GPT_ENGINE, openai_api_key=config.API)
chatllm = ChatOpenAI(streaming=True, callback_manager=CallbackManager([chainStreamHandler]), temperature=config.temperature, openai_api_base=config.bot_api_url.v1_url, model_name=self.engine, openai_api_key=config.API)
chainllm = ChatOpenAI(temperature=config.temperature, openai_api_base=config.bot_api_url.v1_url, model_name=config.GPT_ENGINE, openai_api_key=config.API)

if config.SEARCH_USE_GPT:
gpt_search_thread = ThreadWithReturnValue(target=gptsearch, args=(prompt, chainllm,))
Expand Down
17 changes: 16 additions & 1 deletion config.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,4 +32,19 @@
if whitelist:
whitelist = [int(id) for id in whitelist.split(",")]

USE_G4F = False
USE_G4F = False

class openaiAPI:
def __init__(
self,
api_url: str = (os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions"),
):
from urllib.parse import urlparse, urlunparse
self.source_api_url: str = api_url
parsed_url = urlparse(self.source_api_url)
self.base_url: str = urlunparse(parsed_url[:2] + ("",) * 4)
self.v1_url: str = urlunparse(parsed_url[:2] + ("/v1",) + ("",) * 3)
self.chat_url: str = urlunparse(parsed_url[:2] + ("/v1/chat/completions",) + ("",) * 3)
self.image_url: str = urlunparse(parsed_url[:2] + ("/v1/images/generations",) + ("",) * 3)

bot_api_url = openaiAPI()
17 changes: 8 additions & 9 deletions utils/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@
from langchain.utilities import WikipediaAPIWrapper
from utils.googlesearch import GoogleSearchAPIWrapper
from langchain.document_loaders import UnstructuredPDFLoader
from chatgpt2api.chatgpt2api import bot_api_url

def getmd5(string):
import hashlib
Expand Down Expand Up @@ -96,8 +95,8 @@ def get_chain(store, llm):
return chain

async def docQA(docpath, query_message, persist_db_path="db", model = "gpt-3.5-turbo"):
chatllm = ChatOpenAI(temperature=0.5, openai_api_base=bot_api_url.v1_url, model_name=model, openai_api_key=config.API)
embeddings = OpenAIEmbeddings(openai_api_base=bot_api_url.v1_url, openai_api_key=config.API)
chatllm = ChatOpenAI(temperature=0.5, openai_api_base=config.bot_api_url.v1_url, model_name=model, openai_api_key=config.API)
embeddings = OpenAIEmbeddings(openai_api_base=config.bot_api_url.v1_url, openai_api_key=config.API)

sitemap = "sitemap.xml"
match = re.match(r'^(https?|ftp)://[^\s/$.?#].[^\s]*$', docpath)
Expand Down Expand Up @@ -136,7 +135,7 @@ def get_doc_from_url(url):
return filename

def persist_emdedding_pdf(docurl, persist_db_path):
embeddings = OpenAIEmbeddings(openai_api_base=bot_api_url.v1_url, openai_api_key=os.environ.get('API', None))
embeddings = OpenAIEmbeddings(openai_api_base=config.bot_api_url.v1_url, openai_api_key=os.environ.get('API', None))
filename = get_doc_from_url(docurl)
docpath = os.getcwd() + "/" + filename
loader = UnstructuredPDFLoader(docpath)
Expand All @@ -151,8 +150,8 @@ def persist_emdedding_pdf(docurl, persist_db_path):
return vector_store

async def pdfQA(docurl, docpath, query_message, model="gpt-3.5-turbo"):
chatllm = ChatOpenAI(temperature=0.5, openai_api_base=bot_api_url.v1_url, model_name=model, openai_api_key=os.environ.get('API', None))
embeddings = OpenAIEmbeddings(openai_api_base=bot_api_url.v1_url, openai_api_key=os.environ.get('API', None))
chatllm = ChatOpenAI(temperature=0.5, openai_api_base=config.bot_api_url.v1_url, model_name=model, openai_api_key=os.environ.get('API', None))
embeddings = OpenAIEmbeddings(openai_api_base=config.bot_api_url.v1_url, openai_api_key=os.environ.get('API', None))
persist_db_path = getmd5(docpath)
if not os.path.exists(persist_db_path):
vector_store = persist_emdedding_pdf(docurl, persist_db_path)
Expand All @@ -164,8 +163,8 @@ async def pdfQA(docurl, docpath, query_message, model="gpt-3.5-turbo"):
return result['result']

def pdf_search(docurl, query_message, model="gpt-3.5-turbo"):
chatllm = ChatOpenAI(temperature=0.5, openai_api_base=bot_api_url.v1_url, model_name=model, openai_api_key=os.environ.get('API', None))
embeddings = OpenAIEmbeddings(openai_api_base=bot_api_url.v1_url, openai_api_key=os.environ.get('API', None))
chatllm = ChatOpenAI(temperature=0.5, openai_api_base=config.bot_api_url.v1_url, model_name=model, openai_api_key=os.environ.get('API', None))
embeddings = OpenAIEmbeddings(openai_api_base=config.bot_api_url.v1_url, openai_api_key=os.environ.get('API', None))
filename = get_doc_from_url(docurl)
docpath = os.getcwd() + "/" + filename
loader = UnstructuredPDFLoader(docpath)
Expand Down Expand Up @@ -320,7 +319,7 @@ def get_google_search_results(prompt: str, context_max_tokens: int):
if config.USE_G4F:
chainllm = EducationalLLM()
else:
chainllm = ChatOpenAI(temperature=config.temperature, openai_api_base=bot_api_url.v1_url, model_name=config.GPT_ENGINE, openai_api_key=config.API)
chainllm = ChatOpenAI(temperature=config.temperature, openai_api_base=config.bot_api_url.v1_url, model_name=config.GPT_ENGINE, openai_api_key=config.API)

if config.SEARCH_USE_GPT:
gpt_search_thread = ThreadWithReturnValue(target=gptsearch, args=(prompt, chainllm,))
Expand Down

0 comments on commit 2663bf0

Please sign in to comment.