forked from irisrumtub/GPT-Twitch-Chatbot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
messageHandler.js
119 lines (108 loc) · 3.75 KB
/
messageHandler.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
const handleMessage = (TwitchClient) => {
const fs = require("fs");
const axios = require("axios");
const OPENAI_ENDPOINT = "https://api.openai.com/v1/completions";
const MODEL = "text-davinci-003";
const lastGPTResponse = document.getElementById("aiStatus");
let onCooldown = false
TwitchClient.on("message", (channel, tags, message, self) =>
//here it begins
{
let timeout = "";
let timeoutHTML = Number(document.getElementById("timeout").value);
if (isNaN(timeoutHTML) || timeoutHTML <= 0) {
timeout = 0;
} else {
timeout = timeoutHTML;
}
if (self) return;
if (message.startsWith("!ai") != true) return;
if (onCooldown == true) return;
//getting message substring without !ai part
var twitchMessage = message.substring(message.indexOf(" ") + 1);
//try catch checks if we have a file in folder called stoplist.csv.
//checking the message word by word for matching any of our words in banlist
try {
const csv = fs.readFileSync("stoplist.csv", "utf8").split(",");
if (csv[csv.length - 1] === "") {
csv.pop();
}
const containsValue = csv.some((value) => twitchMessage.includes(value));
if (containsValue) {
TwitchClient.say(channel, "the message contains a naughty word.");
return;
}
} catch (error) {
console.warn("No word filter found!");
}
let tokens = "";
let tokensHTML = Number(document.getElementById("tokens").value);
if (isNaN(tokensHTML) || tokensHTML <= 10) {
tokens = 200;
} else {
tokens = tokensHTML;
}
let aiKey = document.getElementById("aiKey").value;
axios
.post(
OPENAI_ENDPOINT,
{
prompt: twitchMessage,
model: MODEL,
max_tokens: tokens,
n: 1,
temperature: 0.9,
},
{
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${aiKey}`,
},
}
)
.then((response) => {
// The completed text will be in the `response.data.choices[0].text` property
const result = response.data.choices[0].text;
console.log(response);
lastGPTResponse.innerHTML =
"<p style='color:green'>AI is working as intended</p>";
// console.log(result)
const messages = [];
let currentMessage = "";
for (const word of result.split(" ")) {
if (currentMessage.length + word.length + 1 > 500) {
// Add the current message to the list of messages
messages.push(currentMessage);
// Start a new message
currentMessage = "";
}
// Add the word to the current message
currentMessage += `${word} `;
}
// Add the last message to the list of messages
messages.push(currentMessage);
let messageIndex = 0;
const sendNextMessage = () => {
if (messageIndex < messages.length) {
// Send the next message
TwitchClient.say(channel, "Response from AI:"+messages[messageIndex]);
// Increment the message index
messageIndex++;
// Wait 1s before sending the next message
// setTimeout(sendNextMessage, 4000);
}
};
sendNextMessage();
onCooldown = true;
setTimeout(() => onCooldown = false, (timeout*1000))
})
.catch((error) => {
// ndle any errors that occurred in the request
lastGPTResponse.innerHTML = `<p style='color:red'>Encountered following error while sending a request to openai:<br><br><code style='color:white'>${error}</code><br><br>If status code is 401, check your API key for OpenAI</p>`;
});
// setTimeout()
}
//here it ends
);
};
module.exports = handleMessage;