Skip to content

Commit

Permalink
新增平台 大象新闻
Browse files Browse the repository at this point in the history
  • Loading branch information
Ikaros-521 committed Apr 18, 2024
1 parent f82e86b commit f47e1e6
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 3 deletions.
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -231,4 +231,6 @@ out/copy*/*.wav

tests/test_faster_whisper/*.wav

out/图像识别/*.png
out/图像识别/*.png

main copy.py
38 changes: 38 additions & 0 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2595,6 +2595,44 @@ def send():
finally:
logging.warning('关闭连接...')
os._exit(0)
elif platform == "hntv":
import requests

# 初始化已获取的commentId集合
comment_set = set()

def fetch_comments():
try:
url = f'https://pubmod.hntv.tv/dx-bridge/get-comment-with-article-super-v2?limit=40&typeId=1&appFusionId=1390195608019869697&page=1&objectId={my_handle.get_room_id()}'
response = requests.get(url)
if response.status_code == 200:
data = response.json()
items = data.get('result', {}).get('items', [])
for item in items:
comment_id = item.get('commentId')
if comment_id not in comment_set:
comment_set.add(comment_id)
username = item.get('commentUserNickname', '')
content = item.get('content', '')

logging.info(f"[{username}]: {content}")

data = {
"platform": platform,
"username": username,
"content": content
}

my_handle.process_data(data, "comment")
else:
logging.error("获取弹幕数据失败。。。")
except Exception as e:
logging.error(traceback.format_exc())
my_handle.abnormal_alarm_handle("platform")

while True:
fetch_comments()
time.sleep(3) # 每隔3秒轮询一次
elif platform == "talk":
thread.join()

Expand Down
5 changes: 3 additions & 2 deletions webui.py
Original file line number Diff line number Diff line change
Expand Up @@ -2598,6 +2598,7 @@ def save_config():
'youtube': 'YouTube',
'twitch': 'twitch',
'tiktok': 'tiktok',
'hntv': '大象新闻',
},
value=config.get("platform")
).style("width:200px;")
Expand Down Expand Up @@ -3224,8 +3225,8 @@ def save_config():
clearable=True
)
input_chatgpt_temperature = ui.input(label='温度', placeholder='控制生成文本的随机性。较高的温度值会使生成的文本更随机和多样化,而较低的温度值会使生成的文本更加确定和一致。', value=config.get("chatgpt", "temperature")).style("width:200px;")
input_chatgpt_max_tokens = ui.input(label='最大令牌数', placeholder='限制生成回答的最大长度。', value=config.get("chatgpt", "max_tokens")).style("width:200px;")
input_chatgpt_top_p = ui.input(label='前p个选择', placeholder='Nucleus采样。这个参数控制模型从累积概率大于一定阈值的令牌中进行采样。较高的值会产生更多的多样性,较低的值会产生更少但更确定的回答。', value=config.get("chatgpt", "top_p")).style("width:200px;")
input_chatgpt_max_tokens = ui.input(label='最大token数', placeholder='限制生成回答的最大长度。', value=config.get("chatgpt", "max_tokens")).style("width:200px;")
input_chatgpt_top_p = ui.input(label='top_p', placeholder='Nucleus采样。这个参数控制模型从累积概率大于一定阈值的令牌中进行采样。较高的值会产生更多的多样性,较低的值会产生更少但更确定的回答。', value=config.get("chatgpt", "top_p")).style("width:200px;")
with ui.row():
input_chatgpt_presence_penalty = ui.input(label='存在惩罚', placeholder='控制模型生成回答时对给定问题提示的关注程度。较高的存在惩罚值会减少模型对给定提示的重复程度,鼓励模型更自主地生成回答。', value=config.get("chatgpt", "presence_penalty")).style("width:200px;")
input_chatgpt_frequency_penalty = ui.input(label='频率惩罚', placeholder='控制生成回答时对已经出现过的令牌的惩罚程度。较高的频率惩罚值会减少模型生成已经频繁出现的令牌,以避免重复和过度使用特定词语。', value=config.get("chatgpt", "frequency_penalty")).style("width:200px;")
Expand Down

0 comments on commit f47e1e6

Please sign in to comment.