diff --git a/README.md b/README.md index c6b4338c..2a809db4 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ Join the [Telegram Group](https://t.me/+_01cz9tAkUc1YzZl) chat to share your use | ---------------------- | ------------------------------------------------------------ | | **BOT_TOKEN (required)** | Telegram bot token. Create a bot on [BotFather](https://t.me/BotFather) to get the BOT_TOKEN. | | **API (required)** | OpenAI or third-party API key. | -| WEB_HOOK | Whenever the telegram bot receives a user message, the message will be passed to WEB_HOOK, where the bot will listen to it and process the received messages in a timely manner. | +| WEB_HOOK (optional) | Whenever the telegram bot receives a user message, the message will be passed to WEB_HOOK, where the bot will listen to it and process the received messages in a timely manner. | | API_URL(optional) | If you are using the OpenAI official API, you don't need to set this. If you using a third-party API, you need to fill in the third-party proxy website. The default is: https://api.openai.com/v1/chat/completions | | GPT_ENGINE (optional) | Set the default QA model; the default is:`gpt-3.5-turbo`. This item can be freely switched using the bot's "info" command, and it doesn't need to be set in principle. | | NICK (optional) | The default is empty, and NICK is the name of the bot. The bot will only respond when the message starts with NICK that the user inputs, otherwise the bot will respond to any message. Especially in group chats, if there is no NICK, the bot will reply to all messages. | @@ -85,10 +85,10 @@ Follow the prompts to deploy. A secondary domain name will be provided in the of Set environment variables ```bash -flyctl secrets set WEB_HOOK=https://flyio-app-name.fly.dev/ flyctl secrets set BOT_TOKEN=bottoken flyctl secrets set API= # optional +flyctl secrets set WEB_HOOK=https://flyio-app-name.fly.dev/ flyctl secrets set NICK=javis ``` @@ -125,7 +125,6 @@ Start the container ```bash docker run -p 80:8080 --name chatbot -dit \ -e BOT_TOKEN="telegram bot token" \ - -e WEB_HOOK="https://your_host.com/" \ -e API="" \ -e API_URL= \ yym68686/chatgpt:1.0 @@ -141,7 +140,6 @@ services: image: yym68686/chatgpt:1.0 environment: - BOT_TOKEN= - - WEB_HOOK= - API= - API_URL= ports: diff --git a/bot.py b/bot.py index 2e93abbc..539efe76 100644 --- a/bot.py +++ b/bot.py @@ -5,7 +5,8 @@ import traceback import utils.decorators as decorators from utils.md2tgmd import escape -from chatgpt2api.chatgpt2api import Chatbot as GPT +from utils.chatgpt2api import Chatbot as GPT +from utils.chatgpt2api import claudebot from telegram.constants import ChatAction from utils.agent import docQA, get_doc_from_local from telegram import BotCommand, InlineKeyboardButton, InlineKeyboardMarkup @@ -285,7 +286,7 @@ async def delete_message(update, context, messageid, delay=10): # InlineKeyboardButton("gpt-4-32k-0613", callback_data="gpt-4-32k-0613"), # ], [ - InlineKeyboardButton("claude-2", callback_data="claude-2"), + InlineKeyboardButton("claude-2.1", callback_data="claude-2.1"), InlineKeyboardButton("claude-2-web", callback_data="claude-2-web"), ], [ @@ -329,11 +330,15 @@ async def button_press(update, context): callback_query = update.callback_query await callback_query.answer() data = callback_query.data - if ("gpt-" or "cluade") in data: + print(data) + if "gpt-" in data or "claude" in data: config.GPT_ENGINE = data - if config.API: + if config.API and "gpt-" in data: config.ChatGPTbot = GPT(api_key=f"{config.API}", engine=config.GPT_ENGINE, system_prompt=config.systemprompt, temperature=config.temperature) config.ChatGPTbot.reset(convo_id=str(update.effective_chat.id), system_prompt=config.systemprompt) + if config.ClaudeAPI and "claude" in data: + config.ChatGPTbot = claudebot(api_key=f"{config.ClaudeAPI}", engine=config.GPT_ENGINE, system_prompt=config.systemprompt, temperature=config.temperature) + print(config.GPT_ENGINE) try: info_message = ( f"`Hi, {update.effective_user.username}!`\n\n" diff --git a/config.py b/config.py index 2abb5897..c12733df 100644 --- a/config.py +++ b/config.py @@ -20,14 +20,18 @@ Current_Date = current_date.strftime("%Y-%m-%d") systemprompt = f"You are ChatGPT, a large language model trained by OpenAI. Knowledge cutoff: 2021-09. Current date: [ {Current_Date} ]" -from chatgpt2api.chatgpt2api import Chatbot as GPT -from chatgpt2api.chatgpt2api import Imagebot +from utils.chatgpt2api import Chatbot as GPT +from utils.chatgpt2api import Imagebot, claudebot if API: ChatGPTbot = GPT(api_key=f"{API}", engine=GPT_ENGINE, system_prompt=systemprompt, temperature=temperature) dallbot = Imagebot(api_key=f"{API}") else: ChatGPTbot = None +ClaudeAPI = os.environ.get('claude_api_key', None) +if ClaudeAPI: + claudeBot = claudebot(api_key=f"{ClaudeAPI}") + whitelist = os.environ.get('whitelist', None) if whitelist: whitelist = [int(id) for id in whitelist.split(",")] diff --git a/test/test_claude.py b/test/test_claude.py new file mode 100644 index 00000000..5a16fcc8 --- /dev/null +++ b/test/test_claude.py @@ -0,0 +1,190 @@ +import os +import requests +# from ..utils import typings as t +import json +# class claudeConversation(dict): +# def __getitem__(self, index): +# conversation_list = super().__getitem__(index) +# return "\n\n" + "\n\n".join([f"{item['role']}:{item['content']}" for item in conversation_list]) + "\n\nAssistant:" + +# c = claudeConversation() +# c['1'] = [{'role': 'A', 'content': 'hello'}, {'role': 'B', 'content': 'hi'}] +# print(repr(c['1'])) + +import platform +python_version = list(platform.python_version_tuple()) +SUPPORT_ADD_NOTES = int(python_version[0]) >= 3 and int(python_version[1]) >= 11 + +class ChatbotError(Exception): + """ + Base class for all Chatbot errors in this Project + """ + + def __init__(self, *args: object) -> None: + if SUPPORT_ADD_NOTES: + super().add_note( + "Please check that the input is correct, or you can resolve this issue by filing an issue", + ) + super().add_note("Project URL: https://github.com/acheong08/ChatGPT") + super().__init__(*args) + +class APIConnectionError(ChatbotError): + """ + Subclass of ChatbotError + + An exception object thrown when an API connection fails or fails to connect due to network or + other miscellaneous reasons + """ + + def __init__(self, *args: object) -> None: + if SUPPORT_ADD_NOTES: + super().add_note( + "Please check if there is a problem with your network connection", + ) + super().__init__(*args) + +class claudeConversation(dict): + def Conversation(self, index): + conversation_list = super().__getitem__(index) + return "\n\n" + "\n\n".join([f"{item['role']}:{item['content']}" for item in conversation_list]) + "\n\nAssistant:" + + +class claudebot: + def __init__( + self, + api_key: str, + engine: str = os.environ.get("GPT_ENGINE") or "claude-2.1", + temperature: float = 0.5, + top_p: float = 0.7, + chat_url: str = "https://api.anthropic.com/v1/complete", + timeout: float = None, + ): + self.api_key: str = api_key + self.engine: str = engine + self.temperature = temperature + self.top_p = top_p + self.chat_url = chat_url + self.timeout = timeout + self.session = requests.Session() + self.conversation = claudeConversation() + + def add_to_conversation( + self, + message: str, + role: str, + convo_id: str = "default", + + ) -> None: + """ + Add a message to the conversation + """ + self.conversation[convo_id].append({"role": role, "content": message}) + + def reset(self, convo_id: str = "default") -> None: + """ + Reset the conversation + """ + self.conversation[convo_id] = list() + + def __truncate_conversation(self, convo_id: str = "default") -> None: + """ + Truncate the conversation + """ + while True: + if ( + self.get_token_count(convo_id) > self.truncate_limit + and len(self.conversation[convo_id]) > 1 + ): + # Don't remove the first message + self.conversation[convo_id].pop(1) + else: + break + + def get_token_count(self, convo_id: str = "default") -> int: + """ + Get token count + """ + if self.engine not in ENGINES: + raise NotImplementedError( + f"Engine {self.engine} is not supported. Select from {ENGINES}", + ) + tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base" + tiktoken.model.MODEL_TO_ENCODING["claude-2-web"] = "cl100k_base" + tiktoken.model.MODEL_TO_ENCODING["claude-2"] = "cl100k_base" + + encoding = tiktoken.encoding_for_model(self.engine) + + num_tokens = 0 + for message in self.conversation[convo_id]: + # every message follows {role/name}\n{content}\n + num_tokens += 5 + for key, value in message.items(): + if value: + num_tokens += len(encoding.encode(value)) + if key == "name": # if there's a name, the role is omitted + num_tokens += 5 # role is always required and always 1 token + num_tokens += 5 # every reply is primed with assistant + return num_tokens + + def ask_stream( + self, + prompt: str, + role: str = "Human", + convo_id: str = "default", + model: str = None, + pass_history: bool = True, + model_max_tokens: int = 4096, + **kwargs, + ): + if convo_id not in self.conversation or pass_history == False: + self.reset(convo_id=convo_id) + self.add_to_conversation(prompt, role, convo_id=convo_id) + # self.__truncate_conversation(convo_id=convo_id) + # print(self.conversation[convo_id]) + + url = self.chat_url + headers = { + "accept": "application/json", + "anthropic-version": "2023-06-01", + "content-type": "application/json", + "x-api-key": f"{kwargs.get('api_key', self.api_key)}", + } + + json_post = { + "model": os.environ.get("MODEL_NAME") or model or self.engine, + "prompt": self.conversation.Conversation(convo_id) if pass_history else f"\n\nHuman:{prompt}\n\nAssistant:", + "stream": True, + "temperature": kwargs.get("temperature", self.temperature), + "top_p": kwargs.get("top_p", self.top_p), + "max_tokens_to_sample": model_max_tokens, + } + + response = self.session.post( + url, + headers=headers, + json=json_post, + timeout=kwargs.get("timeout", self.timeout), + stream=True, + ) + if response.status_code != 200: + raise BaseException(f"{response.status_code} {response.reason} {response.text}") + response_role: str = "Assistant" + full_response: str = "" + for line in response.iter_lines(): + if not line or line.decode("utf-8") == "event: completion" or line.decode("utf-8") == "event: ping" or line.decode("utf-8") == "data: {}": + continue + line = line.decode("utf-8")[6:] + # print(line) + resp: dict = json.loads(line) + content = resp.get("completion") + full_response += content + yield content + self.add_to_conversation(full_response, response_role, convo_id=convo_id) + print(repr(self.conversation.Conversation(convo_id))) + # print("total tokens:", self.get_token_count(convo_id)) + + +bot = claudebot(api_key=os.environ.get("claude_api_key")) + +for i in bot.ask_stream("python怎么自定义错误"): + print(i, end="") \ No newline at end of file diff --git a/chatgpt2api/chatgpt2api.py b/utils/chatgpt2api.py similarity index 85% rename from chatgpt2api/chatgpt2api.py rename to utils/chatgpt2api.py index b8f6ec18..981ba277 100644 --- a/chatgpt2api/chatgpt2api.py +++ b/utils/chatgpt2api.py @@ -55,6 +55,149 @@ def get_filtered_keys_from_object(obj: object, *keys: str) -> Set[str]: "claude-2", ] +class claudeConversation(dict): + def Conversation(self, index): + conversation_list = super().__getitem__(index) + return "\n\n" + "\n\n".join([f"{item['role']}:{item['content']}" for item in conversation_list]) + "\n\nAssistant:" + + +class claudebot: + def __init__( + self, + api_key: str, + engine: str = os.environ.get("GPT_ENGINE") or "claude-2.1", + temperature: float = 0.5, + top_p: float = 0.7, + chat_url: str = "https://api.anthropic.com/v1/complete", + timeout: float = None, + **kwargs, + ): + self.api_key: str = api_key + self.engine: str = engine + self.temperature = temperature + self.top_p = top_p + self.chat_url = chat_url + self.timeout = timeout + self.session = requests.Session() + self.conversation = claudeConversation() + + def add_to_conversation( + self, + message: str, + role: str, + convo_id: str = "default", + + ) -> None: + """ + Add a message to the conversation + """ + self.conversation[convo_id].append({"role": role, "content": message}) + + def reset(self, convo_id: str = "default") -> None: + """ + Reset the conversation + """ + self.conversation[convo_id] = list() + + def __truncate_conversation(self, convo_id: str = "default") -> None: + """ + Truncate the conversation + """ + while True: + if ( + self.get_token_count(convo_id) > self.truncate_limit + and len(self.conversation[convo_id]) > 1 + ): + # Don't remove the first message + self.conversation[convo_id].pop(1) + else: + break + + def get_token_count(self, convo_id: str = "default") -> int: + """ + Get token count + """ + if self.engine not in ENGINES: + raise NotImplementedError( + f"Engine {self.engine} is not supported. Select from {ENGINES}", + ) + tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base" + tiktoken.model.MODEL_TO_ENCODING["claude-2-web"] = "cl100k_base" + tiktoken.model.MODEL_TO_ENCODING["claude-2"] = "cl100k_base" + + encoding = tiktoken.encoding_for_model(self.engine) + + num_tokens = 0 + for message in self.conversation[convo_id]: + # every message follows {role/name}\n{content}\n + num_tokens += 5 + for key, value in message.items(): + if value: + num_tokens += len(encoding.encode(value)) + if key == "name": # if there's a name, the role is omitted + num_tokens += 5 # role is always required and always 1 token + num_tokens += 5 # every reply is primed with assistant + return num_tokens + + def ask_stream( + self, + prompt: str, + role: str = "Human", + convo_id: str = "default", + model: str = None, + pass_history: bool = True, + model_max_tokens: int = 4096, + **kwargs, + ): + if convo_id not in self.conversation or pass_history == False: + self.reset(convo_id=convo_id) + self.add_to_conversation(prompt, role, convo_id=convo_id) + # self.__truncate_conversation(convo_id=convo_id) + # print(self.conversation[convo_id]) + + url = self.chat_url + headers = { + "accept": "application/json", + "anthropic-version": "2023-06-01", + "content-type": "application/json", + "x-api-key": f"{kwargs.get('api_key', self.api_key)}", + } + + json_post = { + "model": os.environ.get("MODEL_NAME") or model or self.engine, + "prompt": self.conversation.Conversation(convo_id) if pass_history else f"\n\nHuman:{prompt}\n\nAssistant:", + "stream": True, + "temperature": kwargs.get("temperature", self.temperature), + "top_p": kwargs.get("top_p", self.top_p), + "max_tokens_to_sample": model_max_tokens, + } + + response = self.session.post( + url, + headers=headers, + json=json_post, + timeout=kwargs.get("timeout", self.timeout), + stream=True, + ) + if response.status_code != 200: + raise BaseException(f"{response.status_code} {response.reason} {response.text}") + response_role: str = "Assistant" + full_response: str = "" + for line in response.iter_lines(): + if not line or line.decode("utf-8") == "event: completion" or line.decode("utf-8") == "event: ping" or line.decode("utf-8") == "data: {}": + continue + line = line.decode("utf-8")[6:] + # print(line) + resp: dict = json.loads(line) + content = resp.get("completion") + full_response += content + yield content + self.add_to_conversation(full_response, response_role, convo_id=convo_id) + print(repr(self.conversation.Conversation(convo_id))) + # print("total tokens:", self.get_token_count(convo_id)) + + + class Imagebot: def __init__( self, diff --git a/utils/function_call.py b/utils/function_call.py index 2b83cecc..f0f23b35 100644 --- a/utils/function_call.py +++ b/utils/function_call.py @@ -49,24 +49,5 @@ "required": ["url"] } }, - # "web_search": { - # "functions": [ - # { - # "name": "get_web_search_results", - # "description": "Get the web page search results in a given keywords", - # "parameters": { - # "type": "object", - # "properties": { - # "keywords": { - # "type": "string", - # "description": "keywords that can yield better search results, keywords are connected with spaces, e.g. 1. The keywords of the sentence (How much does the zeabur software service cost per month?) is (zeabur price). 2. The keywords of the sentence (今天的微博热搜有哪些?) is (微博 热搜)" - # } - # }, - # "required": ["keywords"] - # } - # } - # ], - # "function_call": "auto" - # }, } diff --git a/utils/runasync.py b/utils/runasync.py deleted file mode 100644 index 76096bef..00000000 --- a/utils/runasync.py +++ /dev/null @@ -1,9 +0,0 @@ -import asyncio -import threading -loop_additional = asyncio.new_event_loop() -thread_additional = threading.Thread(target=loop_additional.run_forever, name="Async Runner", daemon=True) -def run_async(coro): - if not thread_additional.is_alive(): - thread_additional.start() - future = asyncio.run_coroutine_threadsafe(coro, loop_additional) - return future.result() \ No newline at end of file diff --git a/chatgpt2api/typings.py b/utils/typings.py similarity index 100% rename from chatgpt2api/typings.py rename to utils/typings.py