From b357f34639095754def7d5f0ecf4adb827bbe1ef Mon Sep 17 00:00:00 2001 From: ferdinant722 Date: Mon, 15 Apr 2024 22:48:43 +0300 Subject: [PATCH 1/5] Chatbot --- client/script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/script.js b/client/script.js index d027aa76..695f666f 100644 --- a/client/script.js +++ b/client/script.js @@ -86,7 +86,7 @@ const handleSubmit = async (e) => { // messageDiv.innerHTML = "..." loader(messageDiv) - const response = await fetch('https://codex-im0y.onrender.com/', { + const response = await fetch('deployment link here', { method: 'POST', headers: { 'Content-Type': 'application/json', From b49bfbedb612d3f5cdf8f30f7f1eb4e9739f3c8f Mon Sep 17 00:00:00 2001 From: ferdinant722 <75948432+ferdinant722@users.noreply.github.com> Date: Tue, 16 Apr 2024 00:02:38 +0300 Subject: [PATCH 2/5] Update README.md --- README.md | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 3d6775ed..116a3692 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,2 @@ -# Build and Deploy Your Own ChatGPT AI Application That Will Help You Code -![Open AI CodeGPT](https://i.ibb.co/LS4DRhb/image-257.png) - -### [🌟 Become a top 1% Next.js 13 developer in only one course](https://jsmastery.pro/next13) -### [🚀 Land your dream programming job in 6 months](https://jsmastery.pro/masterclass) - -### Launch your development career with project-based coaching - https://www.jsmastery.pro +# Mental Health Chat AI +![Mental Health Open AI CodeGPT](https://i.ibb.co/LS4DRhb/image-257.png) From f528497954e903b313299cd74ba795dc96811089 Mon Sep 17 00:00:00 2001 From: ferdinant722 Date: Tue, 16 Apr 2024 11:26:25 +0300 Subject: [PATCH 3/5] server --- server/script.js | 120 +++++++++++++++++++++++++++++++++++++++++++++++ server/server.js | 48 ------------------- 2 files changed, 120 insertions(+), 48 deletions(-) create mode 100644 server/script.js delete mode 100644 server/server.js diff --git a/server/script.js b/server/script.js new file mode 100644 index 00000000..e6086e16 --- /dev/null +++ b/server/script.js @@ -0,0 +1,120 @@ +import bot from './assets/bot.svg' +import user from './assets/user.svg' + +const form = document.querySelector('form') +const chatContainer = document.querySelector('#chat_container') + +let loadInterval + +function loader(element) { + element.textContent = '' + + loadInterval = setInterval(() => { + // Update the text content of the loading indicator + element.textContent += '.'; + + // If the loading indicator has reached three dots, reset it + if (element.textContent === '....') { + element.textContent = ''; + } + }, 300); +} + +function typeText(element, text) { + let index = 0 + + let interval = setInterval(() => { + if (index < text.length) { + element.innerHTML += text.charAt(index) + index++ + } else { + clearInterval(interval) + } + }, 20) +} + +// generate unique ID for each message div of bot +// necessary for typing text effect for that specific reply +// without unique ID, typing text will work on every element +function generateUniqueId() { + const timestamp = Date.now(); + const randomNumber = Math.random(); + const hexadecimalString = randomNumber.toString(16); + + return `id-${timestamp}-${hexadecimalString}`; +} + +function chatStripe(isAi, value, uniqueId) { + return ( + ` +
+
+
+ ${isAi ? 'bot' : 'user'} +
+
${value}
+
+
+ ` + ) +} + +const handleSubmit = async (e) => { + e.preventDefault() + + const data = new FormData(form) + + // user's chatstripe + chatContainer.innerHTML += chatStripe(false, data.get('prompt')) + + // to clear the textarea input + form.reset() + + // bot's chatstripe + const uniqueId = generateUniqueId() + chatContainer.innerHTML += chatStripe(true, " ", uniqueId) + + // to focus scroll to the bottom + chatContainer.scrollTop = chatContainer.scrollHeight; + + // specific message div + const messageDiv = document.getElementById(uniqueId) + + // messageDiv.innerHTML = "..." + loader(messageDiv) + + const response = await fetch('https://project-openai-codex-1-1.onrender.com', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + prompt: data.get('prompt') + }) + }) + + clearInterval(loadInterval) + messageDiv.innerHTML = " " + + if (response.ok) { + const data = await response.json(); + const parsedData = data.bot.trim() // trims any trailing spaces/'\n' + + typeText(messageDiv, parsedData) + } else { + const err = await response.text() + + messageDiv.innerHTML = "Something went wrong" + alert(err) + } +} + +form.addEventListener('submit', handleSubmit) +form.addEventListener('keyup', (e) => { + if (e.keyCode === 13) { + handleSubmit(e) + } +}) \ No newline at end of file diff --git a/server/server.js b/server/server.js deleted file mode 100644 index a6c56255..00000000 --- a/server/server.js +++ /dev/null @@ -1,48 +0,0 @@ -import express from 'express' -import * as dotenv from 'dotenv' -import cors from 'cors' -import { Configuration, OpenAIApi } from 'openai' - -dotenv.config() - -const configuration = new Configuration({ - apiKey: process.env.OPENAI_API_KEY, -}); - -const openai = new OpenAIApi(configuration); - -const app = express() -app.use(cors()) -app.use(express.json()) - -app.get('/', async (req, res) => { - res.status(200).send({ - message: 'Hello from CodeX!' - }) -}) - -app.post('/', async (req, res) => { - try { - const prompt = req.body.prompt; - - const response = await openai.createCompletion({ - model: "text-davinci-003", - prompt: `${prompt}`, - temperature: 0, // Higher values means the model will take more risks. - max_tokens: 3000, // The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 4096). - top_p: 1, // alternative to sampling with temperature, called nucleus sampling - frequency_penalty: 0.5, // Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - presence_penalty: 0, // Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - }); - - res.status(200).send({ - bot: response.data.choices[0].text - }); - - } catch (error) { - console.error(error) - res.status(500).send(error || 'Something went wrong'); - } -}) - -app.listen(5000, () => console.log('AI server started on http://localhost:5000')) \ No newline at end of file From 91e8184a2ac549aa264f3a908f0794afa50ba526 Mon Sep 17 00:00:00 2001 From: ferdinant722 Date: Tue, 16 Apr 2024 11:34:21 +0300 Subject: [PATCH 4/5] changed naming --- server/script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/script.js b/server/script.js index e6086e16..da71c6ba 100644 --- a/server/script.js +++ b/server/script.js @@ -86,7 +86,7 @@ const handleSubmit = async (e) => { // messageDiv.innerHTML = "..." loader(messageDiv) - const response = await fetch('https://project-openai-codex-1-1.onrender.com', { + const response = await fetch('https://project-openai-codex-1.onrender.com', { method: 'POST', headers: { 'Content-Type': 'application/json', From c75ef38af7b675afe1d241b6ae9c6cacad7d6109 Mon Sep 17 00:00:00 2001 From: ferdinant722 Date: Tue, 16 Apr 2024 11:39:29 +0300 Subject: [PATCH 5/5] ammend --- server/script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/script.js b/server/script.js index da71c6ba..2d60bf42 100644 --- a/server/script.js +++ b/server/script.js @@ -86,7 +86,7 @@ const handleSubmit = async (e) => { // messageDiv.innerHTML = "..." loader(messageDiv) - const response = await fetch('https://project-openai-codex-1.onrender.com', { + const response = await fetch('https://project_openai_codex-1.onrender.com', { method: 'POST', headers: { 'Content-Type': 'application/json',