Skip to content

Commit

Permalink
Merge pull request #679 from Ikaros-521/owner
Browse files Browse the repository at this point in the history
通义千问 补充大模型常用配置项 和 联网搜索功能
  • Loading branch information
Ikaros-521 committed Mar 1, 2024
2 parents f5df072 + 6bedadf commit 049837f
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 2 deletions.
5 changes: 5 additions & 0 deletions config.json
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,11 @@
"model": "qwen-max",
"preset": "你是一个专业的虚拟主播",
"api_key": "",
"temperature": 0.9,
"top_p": 1.0,
"top_k": 3,
"enable_search": true,
"max_tokens": 4096,
"history_enable": true,
"history_max_len": 300
},
Expand Down
5 changes: 5 additions & 0 deletions config.json.bak
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,11 @@
"model": "qwen-max",
"preset": "你是一个专业的虚拟主播",
"api_key": "",
"temperature": 0.9,
"top_p": 1.0,
"top_k": 3,
"enable_search": true,
"max_tokens": 4096,
"history_enable": true,
"history_max_len": 300
},
Expand Down
10 changes: 10 additions & 0 deletions tests/test_tongyi/tongyi.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,11 @@ def get_resp(self, prompt):
self.config_data['model'],
messages=messages,
result_format='message', # set the result to be "message" format.
temperature=self.config_data['temperature'],
top_p=self.config_data['top_p'],
top_k=self.config_data['top_k'],
enable_search=self.config_data['enable_search'],
max_tokens=self.config_data['max_tokens'],
)
if response.status_code == HTTPStatus.OK:
logging.debug(response)
Expand Down Expand Up @@ -131,6 +136,11 @@ def get_resp(self, prompt):
"model": "qwen-max",
"preset": "你是一个专业的虚拟主播",
"api_key": "sk-",
"temperature": 0.9,
"top_p": 0.9,
"top_k": 3,
"enable_search": True,
"max_tokens": 1024,
"history_enable": True,
"history_max_len": 20,
}
Expand Down
10 changes: 10 additions & 0 deletions utils/gpt_model/tongyi.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,11 @@ def get_resp(self, prompt):
self.config_data['model'],
messages=messages,
result_format='message', # set the result to be "message" format.
temperature=self.config_data['temperature'],
top_p=self.config_data['top_p'],
top_k=self.config_data['top_k'],
enable_search=self.config_data['enable_search'],
max_tokens=self.config_data['max_tokens'],
)
if response.status_code == HTTPStatus.OK:
logging.debug(response)
Expand Down Expand Up @@ -131,6 +136,11 @@ def get_resp(self, prompt):
"model": "qwen-max",
"preset": "你是一个专业的虚拟主播",
"api_key": "sk-",
"temperature": 0.9,
"top_p": 0.9,
"top_k": 3,
"enable_search": True,
"max_tokens": 1024,
"history_enable": True,
"history_max_len": 20,
}
Expand Down
12 changes: 10 additions & 2 deletions webui.py
Original file line number Diff line number Diff line change
Expand Up @@ -1168,6 +1168,10 @@ def common_textarea_handle(content):
config_data["tongyi"]["api_key"] = input_tongyi_api_key.value
config_data["tongyi"]["model"] = select_tongyi_model.value
config_data["tongyi"]["preset"] = input_tongyi_preset.value
config_data["tongyi"]["temperature"] = round(float(input_tongyi_temperature.value), 2)
config_data["tongyi"]["top_p"] = round(float(input_tongyi_top_p.value), 2)
config_data["tongyi"]["top_k"] = int(input_tongyi_top_k.value)
config_data["tongyi"]["enable_search"] = switch_tongyi_enable_search.value
config_data["tongyi"]["history_enable"] = switch_tongyi_history_enable.value
config_data["tongyi"]["history_max_len"] = int(input_tongyi_history_max_len.value)

Expand Down Expand Up @@ -2800,8 +2804,12 @@ def common_textarea_handle(content):
value=config.get("tongyi", "model")
).style("width:150px")
input_tongyi_api_key = ui.input(label='密钥', value=config.get("tongyi", "api_key"), placeholder='API类型下,DashScope平台申请的API密钥')
input_tongyi_preset = ui.input(label='预设', placeholder='API类型下,用于指定一组预定义的设置,以便模型更好地适应特定的对话场景。', value=config.get("tongyi", "preset")).style("width:600px")

input_tongyi_preset = ui.input(label='预设', placeholder='API类型下,用于指定一组预定义的设置,以便模型更好地适应特定的对话场景。', value=config.get("tongyi", "preset")).style("width:500px")
input_tongyi_temperature = ui.input(label='temperature', value=config.get("tongyi", "temperature"), placeholder='控制输出的随机性。').style("width:100px")
input_tongyi_top_p = ui.input(label='top_p', value=config.get("tongyi", "top_p"), placeholder='在抽样时考虑的标记的最大累积概率。根据其分配的概率对标记进行排序,以仅考虑最可能的标记。Top-k采样直接限制要考虑的标记的最大数量,而Nucleus采样则基于累积概率限制标记的数量。').style("width:100px")
input_tongyi_top_k = ui.input(label='top_k', value=config.get("tongyi", "top_k"), placeholder='在抽样时考虑的标记的最大数量。Top-k采样考虑一组top_k最有可能的标记。默认值为40。').style("width:100px")
switch_tongyi_enable_search = ui.switch('联网搜索', value=config.get("tongyi", "enable_search")).style(switch_internal_css)

with ui.row():
switch_tongyi_history_enable = ui.switch('上下文记忆', value=config.get("tongyi", "history_enable")).style(switch_internal_css)
input_tongyi_history_max_len = ui.input(label='最大记忆长度', value=config.get("tongyi", "history_max_len"), placeholder='最长能记忆的问答字符串长度,超长会丢弃最早记忆的内容,请慎用!配置过大可能会有丢大米')
Expand Down

0 comments on commit 049837f

Please sign in to comment.