Skip to content

Commit 6ec3d3d

Browse files
author
starpig1129
committed
update gpt response
1 parent 6970c74 commit 6ec3d3d

File tree

5 files changed

+124
-135
lines changed

5 files changed

+124
-135
lines changed

choseAct_system_prompt.txt

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
Here is a list of tools that you have available to you:
2-
2+
'''python
33
def internet_search(query: str, search_type: str):
44
"""
55
Performs a web search based on the given query and search type
6+
If related to a restaurant use eat
67
If the conversation contains a URL, select url
78
Args:
89
query (str): Query to search the web with
@@ -78,7 +79,7 @@ def manage_user_data(user_id: str, user_data: str = None, action: str = 'read'):
7879
action (str): The action to perform. Can be 'read' or 'save'.
7980
"""
8081
pass
81-
82+
```
8283
You are a multi-functional Discord bot assistant. Your role is to analyze user requests, choose the most appropriate tool(s) from the list above, and provide helpful responses. When using the gen_img tool, provide English prompts and add relevant tips.
8384

8485
To use a tool, write 'Action:' followed by a list of actions in JSON format, e.g.:
@@ -89,4 +90,5 @@ Action:
8990
"tool_name": "tool name (one of [manage_user_data,vqa_answer,internet_search, directly_answer,calculate,gen_img,query_schedule,send_reminder])",
9091
"parameters": "the input to the tool"
9192
}
92-
]
93+
]
94+
```

cogs/eat/providers/googlemap_crawler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def __init__(self, driver:str ='./chromedriverlinux64/chromedriver'):
2323

2424
def search(self, keyword):
2525
# 使用webdriver打開特定的Google地圖搜索URL
26-
self.webdriver.get(f"https://www.google.com/maps/search/{keyword}餐廳/@25.1760362,121.4491291,17z")
26+
self.webdriver.get(f"https://www.google.com/maps/search/{keyword}餐廳")
2727
html = self.webdriver.page_source
2828
soup = BeautifulSoup(html, "html.parser")
2929
try:
@@ -37,7 +37,7 @@ def search(self, keyword):
3737
selected = BeautifulSoup(html, "lxml")
3838
except:
3939
print('只有一個結果')
40-
url=f"https://www.google.com/maps/search/{keyword}餐廳/@25.1760362,121.4491291,17z"
40+
url=f"https://www.google.com/maps/search/{keyword}餐廳"
4141
selected = BeautifulSoup(html, "lxml")
4242
# 提取餐廳的標題、評分、類別和地址
4343
title = selected.find('h1', class_='DUwDvf lfPIob').text.strip()

gpt/claude_response.py

Lines changed: 81 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -1,65 +1,68 @@
1-
# MIT License
2-
3-
# Copyright (c) 2024 starpig1129
4-
5-
# Permission is hereby granted, free of charge, to any person obtaining a copy
6-
# of this software and associated documentation files (the "Software"), to deal
7-
# in the Software without restriction, including without limitation the rights
8-
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9-
# copies of the Software, and to permit persons to whom the Software is
10-
# furnished to do so, subject to the following conditions:
11-
12-
# The above copyright notice and this permission notice shall be included in all
13-
# copies or substantial portions of the Software.
14-
15-
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16-
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17-
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18-
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19-
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20-
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21-
# SOFTWARE.
22-
from anthropic import Anthropic, AsyncAnthropic, HUMAN_PROMPT, AI_PROMPT
23-
import requests
24-
import logging
251
import os
2+
from anthropic import AsyncAnthropic, HUMAN_PROMPT, AI_PROMPT
3+
from dotenv import load_dotenv
4+
import asyncio
5+
from threading import Thread
6+
from queue import Queue
7+
8+
# 加載 .env 文件中的環境變量
9+
load_dotenv()
10+
2611
# 初始化 Anthropic 客戶端
27-
anthropic = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
2812
async_anthropic = AsyncAnthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
29-
# 檢查 Claude API 的額度
30-
def check_claude_api_quota():
31-
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
32-
if not ANTHROPIC_API_KEY:
33-
logging.warning("找不到 Anthropic API 金鑰")
34-
return None
35-
36-
try:
37-
# 發送請求到 Anthropic 的額度檢查端點
38-
response = requests.get(
39-
"https://api.anthropic.com/v1/quota",
40-
headers={"Authorization": f"Bearer {ANTHROPIC_API_KEY}"}
41-
)
42-
response.raise_for_status()
43-
quota_info = response.json()
44-
45-
# 返回剩餘額度信息
46-
return quota_info.get("remaining_quota")
47-
except Exception as e:
48-
logging.error(f"檢查 Claude API 額度時發生錯誤: {e}")
49-
return None
50-
# 使用 Claude API 生成流式回應
51-
async def generate_claude_stream_response(system_prompt,prompt, history, message_to_edit, channel):
52-
try:
53-
# 準備對話歷史
54-
messages = []
55-
for msg in history:
13+
14+
class FakeThread:
15+
def __init__(self, target, *args, **kwargs):
16+
self._target = target
17+
self._args = args
18+
self._kwargs = kwargs
19+
self.thread = Thread(target=self._run)
20+
self.is_finished = False
21+
22+
def _run(self):
23+
loop = asyncio.new_event_loop()
24+
asyncio.set_event_loop(loop)
25+
loop.run_until_complete(self._target(*self._args, **self._kwargs))
26+
self.is_finished = True
27+
28+
def start(self):
29+
self.thread.start()
30+
31+
def join(self):
32+
self.thread.join()
33+
34+
class Streamer:
35+
def __init__(self):
36+
self.queue = Queue()
37+
self.is_finished = False
38+
39+
def write(self, content):
40+
self.queue.put(content)
41+
42+
def finish(self):
43+
self.is_finished = True
44+
self.queue.put(None) # 结束标记
45+
46+
def __iter__(self):
47+
return self
48+
49+
def __next__(self):
50+
item = self.queue.get()
51+
if item is None:
52+
raise StopIteration
53+
return item
54+
55+
async def generate_claude_response_with_fake_thread(inst, system_prompt, streamer, dialogue_history=None):
56+
messages = []
57+
if dialogue_history:
58+
for msg in dialogue_history:
5659
role = HUMAN_PROMPT if msg["role"] == "user" else AI_PROMPT
5760
messages.append(f"{role} {msg['content']}")
58-
59-
messages.append(f"{HUMAN_PROMPT}{system_prompt}{prompt}")
60-
full_prompt = "\n\n".join(messages)
61+
62+
messages.append(f"{HUMAN_PROMPT}{system_prompt}{inst}")
63+
full_prompt = "\n\n".join(messages)
6164

62-
# 使用 Claude API 生成流式回應
65+
try:
6366
async with async_anthropic as client:
6467
response_stream = await client.completions.create(
6568
model="claude-3-5-sonnet-20240620",
@@ -68,35 +71,30 @@ async def generate_claude_stream_response(system_prompt,prompt, history, message
6871
stream=True
6972
)
7073

71-
full_response = ""
72-
current_message = message_to_edit
73-
buffer = ""
74-
message_result = ""
75-
buffer_size = 40 # 設置緩衝區大小
76-
7774
async for completion in response_stream:
7875
if completion.stop_reason:
7976
break
8077
chunk = completion.completion
81-
full_response += chunk
82-
buffer += chunk
83-
message_result += chunk
84-
if len(buffer) >= buffer_size:
85-
# 檢查是否超過 1900 字符
86-
if len(full_response+buffer)> 1900:
87-
# 創建新消息
88-
current_message = await channel.send("繼續輸出中...")
89-
full_response = ""
90-
await current_message.edit(content=full_response + buffer)
91-
buffer = "" # 清空緩衝區
92-
93-
# 處理剩餘的文本
94-
if buffer:
95-
if len(full_response+buffer)> 1900:
96-
current_message = await channel.send(buffer)
97-
else:
98-
await current_message.edit(content=full_response + buffer)
99-
return message_result
78+
streamer.write(chunk)
10079
except Exception as e:
101-
logging.error(f"使用 Claude API 生成回應時發生錯誤: {e}")
102-
raise e
80+
streamer.write(f"\nAn error occurred: {str(e)}")
81+
finally:
82+
streamer.finish()
83+
84+
async def generate_claude_response(inst, system_prompt, dialogue_history=None):
85+
streamer = Streamer()
86+
fake_thread = FakeThread(generate_claude_response_with_fake_thread, inst, system_prompt, streamer, dialogue_history)
87+
fake_thread.start()
88+
return fake_thread, streamer
89+
90+
# 使用示例
91+
def main():
92+
thread, streamer = asyncio.run(generate_claude_response("Hello, how are you?", "You are a helpful assistant."))
93+
94+
for content in streamer:
95+
print(content, end='', flush=True)
96+
97+
thread.join()
98+
99+
if __name__ == "__main__":
100+
main()

gpt/openai_response.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,9 @@
3434
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
3535

3636
# 初始化 tokenizer
37-
tokenizer = tiktoken.encoding_for_model("gpt-4o")
37+
tokenizer = tiktoken.encoding_for_model("gpt-4o-mini")
3838

39-
def num_tokens_from_messages(messages, model="gpt-4o"):
39+
def num_tokens_from_messages(messages, model="gpt-4o-mini"):
4040
"""計算消息列表的 token 數量"""
4141
num_tokens = 0
4242
for message in messages:
@@ -102,7 +102,7 @@ async def generate_response_with_fake_thread(inst, system_prompt, streamer, dial
102102

103103
try:
104104
stream = client.chat.completions.create(
105-
model="gpt-4o",
105+
model="gpt-4o-mini",
106106
messages=messages,
107107
max_tokens=4096,
108108
temperature=0.5,

gpt/sendmessage.py

Lines changed: 33 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,7 @@
2323
import json
2424
import faiss
2525
import logging
26-
import discord
2726
from gpt.gpt_response_gen import generate_response
28-
from gpt.claude_response import generate_claude_stream_response
2927
from langchain_community.vectorstores import FAISS
3028
from langchain_huggingface import HuggingFaceEmbeddings
3129
from langchain_community.docstore.in_memory import InMemoryDocstore
@@ -70,7 +68,7 @@ def save_vector_store(stores, path):
7068
try:
7169
for channel_id, store in stores.items():
7270
channel_path = f"{path}_{channel_id}"
73-
faiss.write_index(store.index, channel_path)
71+
#faiss.write_index(store.index, channel_path)
7472
logging.info(f"FAISS 索引已保存到 {path}")
7573
except Exception as e:
7674
logging.error(f"保存 FAISS 索引時發生錯誤: {e}")
@@ -93,9 +91,9 @@ def search_vector_database(query, channel_id):
9391
try:
9492
if channel_id not in vector_stores:
9593
return ''
96-
results = vector_stores[channel_id].similarity_search(query, k=5)
94+
results = vector_stores[channel_id].similarity_search(query, k=20)
9795
related_data = [result.metadata['text'] for result in results]
98-
96+
related_data = set(related_data)
9997
# 格式化相關資訊
10098
formatted_data = "Database:\n"
10199
for i, data in enumerate(related_data, 1):
@@ -131,47 +129,38 @@ async def gpt_message(message_to_edit, message, prompt):
131129
combined_prompt = f"information:<<{related_data}>>user: {prompt}"
132130

133131
try:
134-
# 嘗試使用 Claude API
135-
try:
136-
claude_response = await generate_claude_stream_response(system_prompt,combined_prompt, history_dict, message_to_edit, channel)
137-
return claude_response
138-
except Exception as e:
139-
logging.error(f"Claude API 錯誤: {e}")
140-
logging.warning("無法使用 Claude API,切換到原有的回應邏輯")
141-
142-
# 如果 Claude API 失敗,使用原有的回應邏輯
143-
responses = ""
144-
responsesall = ""
145-
message_result = ""
146-
thread, streamer = await generate_response(combined_prompt, system_prompt, history_dict)
147-
buffer_size = 40 # 設置緩衝區大小
148-
current_message = message_to_edit
149-
150-
for response in streamer:
151-
print(response, end="", flush=True)
152-
responses += response
153-
message_result += response
154-
if len(responses) >= buffer_size:
155-
# 檢查是否超過 2000 字符
156-
if len(responsesall+responses) > 1900:
157-
# 創建新消息
158-
current_message = await channel.send("繼續輸出中...")
159-
responsesall = ""
160-
responsesall += responses
161-
responsesall = responsesall.replace('<|eot_id|>', "")
162-
await current_message.edit(content=responsesall)
163-
responses = "" # 清空 responses 變數
164-
165-
# 處理剩餘的文本
166-
responsesall = responsesall.replace('<|eot_id|>', "")
167-
if len(responsesall+responses) > 1900:
168-
current_message = await channel.send(responses)
169-
else:
170-
responsesall+=responses
132+
responses = ""
133+
responsesall = ""
134+
message_result = ""
135+
thread, streamer = await generate_response(combined_prompt, system_prompt, history_dict)
136+
buffer_size = 40 # 設置緩衝區大小
137+
current_message = message_to_edit
138+
139+
for response in streamer:
140+
print(response, end="", flush=True)
141+
responses += response
142+
message_result += response
143+
if len(responses) >= buffer_size:
144+
# 檢查是否超過 2000 字符
145+
if len(responsesall+responses) > 1900:
146+
# 創建新消息
147+
current_message = await channel.send("繼續輸出中...")
148+
responsesall = ""
149+
responsesall += responses
171150
responsesall = responsesall.replace('<|eot_id|>', "")
172151
await current_message.edit(content=responsesall)
173-
thread.join()
174-
return message_result
152+
responses = "" # 清空 responses 變數
153+
154+
# 處理剩餘的文本
155+
responsesall = responsesall.replace('<|eot_id|>', "")
156+
if len(responsesall+responses) > 1900:
157+
current_message = await channel.send(responses)
158+
else:
159+
responsesall+=responses
160+
responsesall = responsesall.replace('<|eot_id|>', "")
161+
await current_message.edit(content=responsesall)
162+
thread.join()
163+
return message_result
175164
except Exception as e:
176165
logging.error(f"生成回應時發生錯誤: {e}")
177166
await message_to_edit.edit(content="抱歉,我不會講話了。")

0 commit comments

Comments
 (0)