Skip to content

Commit 53b5566

Browse files
Add project files.
1 parent 7424ff2 commit 53b5566

File tree

4 files changed

+354
-0
lines changed

4 files changed

+354
-0
lines changed

Diff for: Templates/Index.HTML

+179
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,179 @@
1+
<!DOCTYPE html>
2+
<html lang="en">
3+
<head>
4+
<meta charset="UTF-8">
5+
<title>Chatbot</title>
6+
<style>
7+
/* General styles */
8+
body {
9+
font-family: Arial, sans-serif;
10+
background-color: #f4f4f9;
11+
margin: 0;
12+
padding: 0;
13+
display: flex;
14+
justify-content: center;
15+
align-items: center;
16+
height: 100vh;
17+
}
18+
19+
/* Container for the entire chat application */
20+
#chat-container {
21+
width: 400px;
22+
background-color: #fff;
23+
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
24+
border-radius: 8px;
25+
overflow: hidden;
26+
}
27+
28+
/* Chat window where messages are displayed */
29+
#chat-window {
30+
height: 400px;
31+
border-bottom: 1px solid #ccc;
32+
overflow-y: scroll;
33+
padding: 20px;
34+
}
35+
36+
/* Input field and send button container */
37+
#input-container {
38+
display: flex;
39+
padding: 10px;
40+
background-color: #f4f4f9;
41+
}
42+
43+
/* Input field for user messages */
44+
#user-input {
45+
flex: 1;
46+
padding: 10px;
47+
border: 1px solid #ccc;
48+
border-radius: 4px;
49+
margin-right: 10px;
50+
font-size: 16px;
51+
}
52+
53+
/* Send button */
54+
#send-button {
55+
padding: 10px 20px;
56+
background-color: #007bff;
57+
border: none;
58+
color: white;
59+
border-radius: 4px;
60+
cursor: pointer;
61+
font-size: 16px;
62+
}
63+
64+
#send-button:hover {
65+
background-color: #0056b3;
66+
}
67+
68+
/* Styling for each chat message */
69+
.chat-message {
70+
margin-bottom: 10px;
71+
clear: both;
72+
}
73+
74+
/* Styling for user messages */
75+
.chat-message.user {
76+
text-align: right;
77+
}
78+
79+
.chat-message.user .message {
80+
background-color: #007bff;
81+
color: white;
82+
border-radius: 15px 15px 0 15px;
83+
}
84+
85+
/* Styling for bot messages */
86+
.chat-message.bot {
87+
text-align: left;
88+
}
89+
90+
.chat-message.bot .message {
91+
background-color: #f1f1f1;
92+
color: black;
93+
border-radius: 15px 15px 15px 0;
94+
}
95+
96+
/* Common styling for message text */
97+
.message {
98+
display: inline-block;
99+
padding: 10px 15px;
100+
max-width: 80%;
101+
word-wrap: break-word;
102+
}
103+
104+
/* Styling for response time */
105+
.chat-message em {
106+
display: block;
107+
font-size: 0.8em;
108+
color: #888; /* Lighter color for less emphasis */
109+
}
110+
</style>
111+
</head>
112+
<body>
113+
<!-- Container for the chat interface -->
114+
<div id="chat-container">
115+
<!-- Window where chat messages appear -->
116+
<div id="chat-window"></div>
117+
<!-- Input field and send button container -->
118+
<div id="input-container">
119+
<input type="text" id="user-input" placeholder="Type a message...">
120+
<button id="send-button">Send</button>
121+
</div>
122+
</div>
123+
124+
<script>
125+
// Function to append a message to the chat window
126+
function appendMessage(sender, text, responseTime = null) {
127+
const chatWindow = document.getElementById('chat-window');
128+
const messageElement = document.createElement('div');
129+
messageElement.classList.add('chat-message', sender);
130+
131+
const messageText = document.createElement('div');
132+
messageText.classList.add('message');
133+
messageText.textContent = text;
134+
messageElement.appendChild(messageText);
135+
136+
if (responseTime) {
137+
const timeElement = document.createElement('em');
138+
timeElement.textContent = `Query took: ${responseTime.toFixed(2)} seconds`;
139+
messageElement.appendChild(timeElement);
140+
}
141+
142+
chatWindow.appendChild(messageElement);
143+
chatWindow.scrollTop = chatWindow.scrollHeight; // Auto-scroll to the bottom
144+
}
145+
146+
document.getElementById('send-button').addEventListener('click', async () => {
147+
const userInput = document.getElementById('user-input').value;
148+
if (!userInput.trim()) return; // Do not send empty messages
149+
document.getElementById('user-input').value = '';
150+
151+
appendMessage('user', userInput); // Append user's message
152+
153+
// Send user input to the backend
154+
const response = await fetch('/get', {
155+
method: 'POST',
156+
headers: {
157+
'Content-Type': 'application/json'
158+
},
159+
body: JSON.stringify({ msg: userInput }) // Convert input to JSON
160+
});
161+
162+
// Get response data from the backend
163+
const data = await response.json();
164+
if (data.error) {
165+
appendMessage('bot', data.error); // Append bot's error message
166+
} else {
167+
appendMessage('bot', data.response, data.response_time); // Append bot's response and response time
168+
}
169+
});
170+
171+
// Allow pressing Enter key to send message
172+
document.getElementById('user-input').addEventListener('keypress', (e) => {
173+
if (e.key === 'Enter') {
174+
document.getElementById('send-button').click();
175+
}
176+
});
177+
</script>
178+
</body>
179+
</html>

Diff for: pllm.py

+117
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
import os
2+
import time
3+
from flask import Flask, render_template, request, jsonify
4+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
5+
6+
# Initialize Flask app
7+
app = Flask(__name__)
8+
9+
# Load the model and tokenizer once
10+
model_name = "EleutherAI/gpt-neo-2.7B"
11+
tokenizer = AutoTokenizer.from_pretrained(model_name)
12+
13+
# Add pad_token if it doesn't exist
14+
if tokenizer.pad_token is None:
15+
tokenizer.add_special_tokens({'pad_token': tokenizer.eos_token})
16+
17+
# Print to verify the pad_token has been set
18+
print("Pad token:", tokenizer.pad_token)
19+
20+
model = AutoModelForCausalLM.from_pretrained(model_name)
21+
model.resize_token_embeddings(len(tokenizer)) # Resize embeddings to include the new pad_token
22+
23+
# Set device to -1 to ensure it runs on CPU
24+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
25+
print("Model EleutherAI/gpt-neo-2.7B loaded. Tokenizer and Pipeline created.")
26+
27+
def format_prompt(user_text):
28+
"""
29+
Format the user input to provide more context for the model.
30+
"""
31+
return f"Please provide a professional and factual answer without adding personal opinions to the following question:\nQ: {user_text}\nA:"
32+
33+
def post_process_response(response):
34+
"""
35+
Post-process the model's response to ensure it is concise and appropriate.
36+
"""
37+
# Ensure the response ends conclusively and truncate to 200 characters
38+
if "A:" in response:
39+
response = response.split("A:")[1].strip()
40+
41+
response = response.split("Q:")[0].strip() # Remove any additional questions
42+
response = response.split(". ")[0] + '.' # Take only the first sentence
43+
return response[:200] # Truncate to 200 characters
44+
#return response
45+
46+
47+
48+
49+
@app.route('/')
50+
def home():
51+
"""
52+
Render the home page.
53+
"""
54+
try:
55+
return render_template('index.html')
56+
except Exception as e:
57+
return str(e)
58+
59+
@app.route('/get', methods=['POST'])
60+
def get_bot_response():
61+
"""
62+
Get the bot's response to the user's input.
63+
"""
64+
user_text = request.get_json().get('msg')
65+
formatted_prompt = format_prompt(user_text)
66+
print("User input:", user_text)
67+
print("Formatted prompt:", formatted_prompt)
68+
69+
start_time = time.time() # Start timing the response generation
70+
71+
try:
72+
# Tokenize the input with truncation and padding
73+
inputs = tokenizer(formatted_prompt, return_tensors="pt", truncation=True, padding=True, max_length=1024)
74+
prompt_length = inputs.input_ids.shape[1]
75+
76+
# Log the length of the prompt
77+
print(f"Prompt length: {prompt_length}")
78+
79+
# Calculate the maximum response length to stay within the 2048 token limit
80+
max_response_length = min(200, 2048 - prompt_length)
81+
if max_response_length <= 0:
82+
return jsonify({"error": "Input exceeds the maximum token limit."})
83+
84+
# Log the max response length
85+
print(f"Max response length: {max_response_length}")
86+
87+
# Generate the response
88+
response = model.generate(
89+
inputs['input_ids'],
90+
attention_mask=inputs['attention_mask'],
91+
max_length=prompt_length + max_response_length, # Ensure the total length doesn't exceed 2048 tokens
92+
min_length=prompt_length + 50, # Ensure the response is meaningful
93+
pad_token_id=tokenizer.pad_token_id, # Set pad token id
94+
no_repeat_ngram_size=2,
95+
num_return_sequences=1,
96+
temperature=0.50, # Adjusted for more coherent output
97+
top_p=0.80 # Adjust top_p for more relevant results
98+
)
99+
100+
# Log the generated response
101+
print("Response generated")
102+
103+
generated_text = tokenizer.decode(response[0], skip_special_tokens=True)
104+
processed_response = post_process_response(generated_text)
105+
106+
end_time = time.time() # End timing the response generation
107+
response_time = end_time - start_time
108+
print(f"Time taken for response: {response_time} seconds")
109+
110+
return jsonify({"response": processed_response, "response_time": response_time})
111+
except Exception as e:
112+
print("Error during model generation:", str(e))
113+
return jsonify({"error": str(e)})
114+
115+
if __name__ == "__main__":
116+
app.run(debug=False)
117+

Diff for: pllm.pyproj

+35
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003" ToolsVersion="4.0">
2+
<PropertyGroup>
3+
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
4+
<SchemaVersion>2.0</SchemaVersion>
5+
<ProjectGuid>fcf85714-8a47-4b38-a5b1-7f76a00ca3f7</ProjectGuid>
6+
<ProjectHome>.</ProjectHome>
7+
<StartupFile>pllm.py</StartupFile>
8+
<SearchPath>
9+
</SearchPath>
10+
<WorkingDirectory>.</WorkingDirectory>
11+
<OutputPath>.</OutputPath>
12+
<Name>pllm</Name>
13+
<RootNamespace>pllm</RootNamespace>
14+
</PropertyGroup>
15+
<PropertyGroup Condition=" '$(Configuration)' == 'Debug' ">
16+
<DebugSymbols>true</DebugSymbols>
17+
<EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
18+
</PropertyGroup>
19+
<PropertyGroup Condition=" '$(Configuration)' == 'Release' ">
20+
<DebugSymbols>true</DebugSymbols>
21+
<EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
22+
</PropertyGroup>
23+
<ItemGroup>
24+
<Compile Include="pllm.py" />
25+
</ItemGroup>
26+
<Import Project="$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)\Python Tools\Microsoft.PythonTools.targets" />
27+
<!-- Uncomment the CoreCompile target to enable the Build command in
28+
Visual Studio and specify your pre- and post-build commands in
29+
the BeforeBuild and AfterBuild targets below. -->
30+
<!--<Target Name="CoreCompile" />-->
31+
<Target Name="BeforeBuild">
32+
</Target>
33+
<Target Name="AfterBuild">
34+
</Target>
35+
</Project>

Diff for: pllm.sln

+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
2+
Microsoft Visual Studio Solution File, Format Version 12.00
3+
# Visual Studio Version 17
4+
VisualStudioVersion = 17.6.33829.357
5+
MinimumVisualStudioVersion = 10.0.40219.1
6+
Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "pllm", "pllm.pyproj", "{FCF85714-8A47-4B38-A5B1-7F76A00CA3F7}"
7+
EndProject
8+
Global
9+
GlobalSection(SolutionConfigurationPlatforms) = preSolution
10+
Debug|Any CPU = Debug|Any CPU
11+
Release|Any CPU = Release|Any CPU
12+
EndGlobalSection
13+
GlobalSection(ProjectConfigurationPlatforms) = postSolution
14+
{FCF85714-8A47-4B38-A5B1-7F76A00CA3F7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
15+
{FCF85714-8A47-4B38-A5B1-7F76A00CA3F7}.Release|Any CPU.ActiveCfg = Release|Any CPU
16+
EndGlobalSection
17+
GlobalSection(SolutionProperties) = preSolution
18+
HideSolutionNode = FALSE
19+
EndGlobalSection
20+
GlobalSection(ExtensibilityGlobals) = postSolution
21+
SolutionGuid = {CA255654-3A25-472C-81C8-592C2C724D29}
22+
EndGlobalSection
23+
EndGlobal

0 commit comments

Comments
 (0)