diff --git a/config.json b/config.json index e00cb405..c3210f64 100644 --- a/config.json +++ b/config.json @@ -351,9 +351,11 @@ "history_max_len": 300 }, "my_wenxinworkshop": { + "type": "千帆大模型", "model": "ERNIEBot", "api_key": "", "secret_key": "", + "app_token": "", "top_p": 0.8, "temperature": 0.9, "penalty_score": 1.0, diff --git a/config.json.bak b/config.json.bak index e00cb405..c3210f64 100644 --- a/config.json.bak +++ b/config.json.bak @@ -351,9 +351,11 @@ "history_max_len": 300 }, "my_wenxinworkshop": { + "type": "千帆大模型", "model": "ERNIEBot", "api_key": "", "secret_key": "", + "app_token": "", "top_p": 0.8, "temperature": 0.9, "penalty_score": 1.0, diff --git "a/docs/\346\212\225\350\265\204\344\272\272/index.html" "b/docs/\346\212\225\350\265\204\344\272\272/index.html" index d0485240..3fb06b7a 100644 --- "a/docs/\346\212\225\350\265\204\344\272\272/index.html" +++ "b/docs/\346\212\225\350\265\204\344\272\272/index.html" @@ -286,6 +286,11 @@ avatar: "https://images.cnblogs.com/cnblogs_com/ikaros-521/2328032/o_240219073301_QQ%E5%9B%BE%E7%89%8720240219153230.jpg", amount: "¥100" }, + { + name: "不醉不归", + avatar: "https://images.cnblogs.com/cnblogs_com/ikaros-521/2328032/o_240320155329_QQ%E5%9B%BE%E7%89%8720240320235305.jpg", + amount: "¥100" + }, { name: "很养眼的丑男", avatar: "https://images.cnblogs.com/cnblogs_com/ikaros-521/2328032/o_230722162732_QQ%E5%9B%BE%E7%89%8720230723000544.jpg", diff --git "a/docs/\346\212\225\350\265\204\344\272\272/invest.png" "b/docs/\346\212\225\350\265\204\344\272\272/invest.png" index 28c77e7d..e2c7b3fb 100644 Binary files "a/docs/\346\212\225\350\265\204\344\272\272/invest.png" and "b/docs/\346\212\225\350\265\204\344\272\272/invest.png" differ diff --git a/tests/test_blivedm/2.py b/tests/test_blivedm/2.py index a848063a..f5519c85 100644 --- a/tests/test_blivedm/2.py +++ b/tests/test_blivedm/2.py @@ -5,13 +5,20 @@ import blivedm.models.open_live as open_models import blivedm.models.web as web_models +config_json = { + "ACCESS_KEY_ID": "", + "ACCESS_KEY_SECRET": "", + "APP_ID": 0, + "ROOM_OWNER_AUTH_CODE": "" +} + # 在开放平台申请的开发者密钥 -ACCESS_KEY_ID = '' -ACCESS_KEY_SECRET = '' +ACCESS_KEY_ID = config_json["ACCESS_KEY_ID"] +ACCESS_KEY_SECRET = config_json["ACCESS_KEY_SECRET"] # 在开放平台创建的项目ID -APP_ID = 0 +APP_ID = config_json["APP_ID"] # 主播身份码 -ROOM_OWNER_AUTH_CODE = '' +ROOM_OWNER_AUTH_CODE = config_json["ROOM_OWNER_AUTH_CODE"] async def main(): diff --git a/utils/gpt_model/my_wenxinworkshop.py b/utils/gpt_model/my_wenxinworkshop.py index f459eabc..1e478afa 100644 --- a/utils/gpt_model/my_wenxinworkshop.py +++ b/utils/gpt_model/my_wenxinworkshop.py @@ -1,26 +1,60 @@ import json, logging, traceback -from wenxinworkshop import LLMAPI, EmbeddingAPI, PromptTemplateAPI +from wenxinworkshop import LLMAPI, AppBuilderAPI, EmbeddingAPI, PromptTemplateAPI from wenxinworkshop import Message, Messages, Texts +from utils.common import Common +from utils.logger import Configure_logger + # 前往官网:https://cloud.baidu.com/product/wenxinworkshop 申请服务获取 class My_WenXinWorkShop: def __init__(self, data): - # self.common = Common() - # # 日志文件路径 - # file_path = "./log/log-" + self.common.get_bj_time(1) + ".txt" - # Configure_logger(file_path) + self.common = Common() + # 日志文件路径 + file_path = "./log/log-" + self.common.get_bj_time(1) + ".txt" + Configure_logger(file_path) self.config_data = data self.history = [] + self.my_bot = None + + logging.debug(self.config_data) + try: - # create a LLM API - self.my_bot = LLMAPI( - api_key=self.config_data["api_key"], - secret_key=self.config_data["secret_key"], - url=LLMAPI.ERNIEBot - ) + if self.config_data['type'] == "千帆大模型": + model_url_map = { + "ERNIEBot": LLMAPI.ERNIEBot, + "ERNIEBot_turbo": LLMAPI.ERNIEBot_turbo, + "ERNIEBot_4_0": LLMAPI.ERNIEBot_4_0, + "BLOOMZ_7B": LLMAPI.BLOOMZ_7B, + "LLAMA_2_7B": LLMAPI.LLAMA_2_7B, + "LLAMA_2_13B": LLMAPI.LLAMA_2_13B, + "LLAMA_2_70B": LLMAPI.LLAMA_2_70B, + "ERNIEBot_4_0": LLMAPI.ERNIEBot_4_0, + "QIANFAN_BLOOMZ_7B_COMPRESSED": LLMAPI.QIANFAN_BLOOMZ_7B_COMPRESSED, + "QIANFAN_CHINESE_LLAMA_2_7B": LLMAPI.QIANFAN_CHINESE_LLAMA_2_7B, + "CHATGLM2_6B_32K": LLMAPI.CHATGLM2_6B_32K, + "AQUILACHAT_7B": LLMAPI.AQUILACHAT_7B, + "ERNIE_BOT_8K": LLMAPI.ERNIE_BOT_8K, + "CODELLAMA_7B_INSTRUCT": LLMAPI.CODELLAMA_7B_INSTRUCT, + "XUANYUAN_70B_CHAT": LLMAPI.XUANYUAN_70B_CHAT, + "CHATLAW": LLMAPI.QIANFAN_BLOOMZ_7B_COMPRESSED, + "QIANFAN_BLOOMZ_7B_COMPRESSED": LLMAPI.CHATLAW, + } + + selected_model = self.config_data["model"] + if selected_model in model_url_map: + self.my_bot = LLMAPI( + api_key=self.config_data["api_key"], + secret_key=self.config_data["secret_key"], + url=model_url_map[selected_model] + ) + elif self.config_data['type'] == "AppBuilder": + self.my_bot = AppBuilderAPI( + app_token=self.config_data["app_token"], + history_enable=self.config_data["history_enable"] + ) except Exception as e: logging.error(traceback.format_exc()) @@ -36,48 +70,54 @@ def get_resp(self, prompt): str: 返回的文本回答 """ try: - # create messages - messages: Messages = [] - - for history in self.history: + if self.config_data['type'] == "千帆大模型": + # create messages + messages: Messages = [] + + for history in self.history: + messages.append(Message( + role=history["role"], + content=history["content"] + )) + messages.append(Message( - role=history["role"], - content=history["content"] + role='user', + content=prompt )) - messages.append(Message( - role='user', - content=prompt - )) - - logging.info(f"self.history={self.history}") - - # get response from LLM API - resp_content = self.my_bot( - messages=messages, - temperature=self.config_data["temperature"], - top_p=self.config_data["top_p"], - penalty_score=self.config_data["penalty_score"], - stream=None, - user_id=None, - chunk_size=512 - ) - - # 启用历史就给我记住! - if self.config_data["history_enable"]: - while True: - # 获取嵌套列表中所有字符串的字符数 - total_chars = sum(len(item['content']) for item in self.history if 'content' in item) - # 如果大于限定最大历史数,就剔除第一个元素 - if total_chars > self.config_data["history_max_len"]: - self.history.pop(0) - self.history.pop(0) - else: - # self.history.pop() - self.history.append({"role": "user", "content": prompt}) - self.history.append({"role": "assistant", "content": resp_content}) - break - + logging.info(f"self.history={self.history}") + + # get response from LLM API + resp_content = self.my_bot( + messages=messages, + temperature=self.config_data["temperature"], + top_p=self.config_data["top_p"], + penalty_score=self.config_data["penalty_score"], + stream=None, + user_id=None, + chunk_size=512 + ) + + # 启用历史就给我记住! + if self.config_data["history_enable"]: + while True: + # 获取嵌套列表中所有字符串的字符数 + total_chars = sum(len(item['content']) for item in self.history if 'content' in item) + # 如果大于限定最大历史数,就剔除第一个元素 + if total_chars > self.config_data["history_max_len"]: + self.history.pop(0) + self.history.pop(0) + else: + # self.history.pop() + self.history.append({"role": "user", "content": prompt}) + self.history.append({"role": "assistant", "content": resp_content}) + break + elif self.config_data['type'] == "AppBuilder": + resp_content = self.my_bot( + query=prompt, + response_mode="blocking" + ) + return resp_content except Exception as e: diff --git a/utils/my_handle.py b/utils/my_handle.py index e8e2e60e..58b21183 100644 --- a/utils/my_handle.py +++ b/utils/my_handle.py @@ -1224,7 +1224,8 @@ def llm_handle(self, chat_type, data, type="chat"): # 使用字典映射的方式来获取响应内容 resp_content = chat_model_methods.get(chat_type, lambda: data["content"])() - resp_content = resp_content.strip() + if resp_content is not None: + resp_content = resp_content.strip() logging.debug(f"resp_content={resp_content}") diff --git a/webui.py b/webui.py index a108fcc8..1a1bc8aa 100644 --- a/webui.py +++ b/webui.py @@ -1432,6 +1432,7 @@ def common_textarea_handle(content): # config_data["my_qianfan"]["history_max_len"] = int(input_my_qianfan_history_max_len.value) if config.get("webui", "show_card", "llm", "my_wenxinworkshop"): + config_data["my_wenxinworkshop"]["type"] = select_my_wenxinworkshop_type.value config_data["my_wenxinworkshop"]["model"] = select_my_wenxinworkshop_model.value config_data["my_wenxinworkshop"]["api_key"] = input_my_wenxinworkshop_api_key.value config_data["my_wenxinworkshop"]["secret_key"] = input_my_wenxinworkshop_secret_key.value @@ -1441,6 +1442,8 @@ def common_textarea_handle(content): config_data["my_wenxinworkshop"]["history_enable"] = switch_my_wenxinworkshop_history_enable.value config_data["my_wenxinworkshop"]["history_max_len"] = int(input_my_wenxinworkshop_history_max_len.value) + config_data["my_wenxinworkshop"]["app_token"] = input_my_wenxinworkshop_app_token.value + if config.get("webui", "show_card", "llm", "gemini"): config_data["gemini"]["api_key"] = input_gemini_api_key.value config_data["gemini"]["model"] = select_gemini_model.value @@ -2567,35 +2570,25 @@ def common_textarea_handle(content): chatgpt_models = [ "gpt-3.5-turbo", "gpt-3.5-turbo-instruct", - "gpt-3.5-turbo-instruct-0914", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-instruct", - "gpt-3.5-turbo-instruct-0914", "gpt-4", "gpt-4-turbo-preview", - "gpt-4-0613", "gpt-4-32k", - "gpt-4-32k-0613", "gpt-4-1106-preview", "gpt-4-0125-preview", "text-embedding-3-large", "text-embedding-3-small", "text-davinci-003", - "text-davinci-002", - "text-curie-001", - "text-babbage-001", - "text-ada-001", - "text-moderation-latest", - "text-moderation-stable", "rwkv", "chatglm3-6b", "moonshot-v1-8k", "gemma:2b", - "qwen" + "qwen", + "qwen:1.8b-chat" ] data_json = {} for line in chatgpt_models: @@ -2979,6 +2972,15 @@ def common_textarea_handle(content): if config.get("webui", "show_card", "llm", "my_wenxinworkshop"): with ui.card().style(card_css): ui.label("千帆大模型") + with ui.row(): + select_my_wenxinworkshop_type = ui.select( + label='类型', + options={"千帆大模型": "千帆大模型", "AppBuilder": "AppBuilder"}, + value=config.get("my_wenxinworkshop", "type") + ).style("width:150px") + switch_my_wenxinworkshop_history_enable = ui.switch('上下文记忆', value=config.get("my_wenxinworkshop", "history_enable")).style(switch_internal_css) + input_my_wenxinworkshop_history_max_len = ui.input(label='最大记忆长度', value=config.get("my_wenxinworkshop", "history_max_len"), placeholder='最长能记忆的问答字符串长度,超长会丢弃最早记忆的内容,请慎用!配置过大可能会有丢大米') + with ui.row(): input_my_wenxinworkshop_api_key = ui.input(label='api_key', value=config.get("my_wenxinworkshop", "api_key"), placeholder='千帆大模型平台,开通对应服务。应用接入-创建应用,填入api key') input_my_wenxinworkshop_secret_key = ui.input(label='secret_key', value=config.get("my_wenxinworkshop", "secret_key"), placeholder='千帆大模型平台,开通对应服务。应用接入-创建应用,填入secret key') @@ -3009,13 +3011,15 @@ def common_textarea_handle(content): options=data_json, value=config.get("my_wenxinworkshop", "model") ).style("width:150px") - switch_my_wenxinworkshop_history_enable = ui.switch('上下文记忆', value=config.get("my_wenxinworkshop", "history_enable")).style(switch_internal_css) - input_my_wenxinworkshop_history_max_len = ui.input(label='最大记忆长度', value=config.get("my_wenxinworkshop", "history_max_len"), placeholder='最长能记忆的问答字符串长度,超长会丢弃最早记忆的内容,请慎用!配置过大可能会有丢大米') - with ui.row(): + input_my_wenxinworkshop_temperature = ui.input(label='温度', value=config.get("my_wenxinworkshop", "temperature"), placeholder='(0, 1.0] 控制生成文本的随机性。较高的温度值会使生成的文本更随机和多样化,而较低的温度值会使生成的文本更加确定和一致。').style("width:200px;") input_my_wenxinworkshop_top_p = ui.input(label='前p个选择', value=config.get("my_wenxinworkshop", "top_p"), placeholder='[0, 1.0] Nucleus采样。这个参数控制模型从累积概率大于一定阈值的令牌中进行采样。较高的值会产生更多的多样性,较低的值会产生更少但更确定的回答。').style("width:200px;") input_my_wenxinworkshop_penalty_score = ui.input(label='惩罚得分', value=config.get("my_wenxinworkshop", "penalty_score"), placeholder='[1.0, 2.0] 在生成文本时对某些词语或模式施加的惩罚。这是一种调节生成内容的机制,用来减少或避免不希望出现的内容。').style("width:200px;") + with ui.row(): + input_my_wenxinworkshop_app_token = ui.input(label='app_token', value=config.get("my_wenxinworkshop", "app_token"), placeholder='千帆AppBuilder平台,我的应用-应用配置-发布详情-我的Agent应用-API调用,填入app_token').style("width:200px;") + + # with ui.card().style(card_css): # ui.label("千帆大模型(兼容问题暂不启用)") # with ui.row():