Skip to content

Commit d7ff444

Browse files
authored
Merge pull request #13 from Adri6336/fix_character/model selection bug
Due to the way models are named and selected from the set_model function, functions would use default models instead of the currently used one. Additionally, stay in character was erroneously left using GPT-4 only; this update should fix that.
2 parents 13a8865 + f724d93 commit d7ff444

File tree

1 file changed

+8
-5
lines changed

1 file changed

+8
-5
lines changed

chatbot.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,13 @@
1212
from general_functions import *
1313

1414

15-
def stay_in_character(message: str, key: str) -> tuple:
15+
def stay_in_character(message: str, key: str, model: str) -> tuple:
1616
"""
1717
If the AI says something too robotic, this will have it stay in character.
1818
1919
:param message: This is the message the AI gave you.
2020
:param key: This is your OpenAI key.
21+
:param model: desired GPT model
2122
:return: (bool representing success status, message, tokens)
2223
"""
2324

@@ -32,7 +33,7 @@ def stay_in_character(message: str, key: str) -> tuple:
3233

3334
# Setup GPT
3435
gpt = GPT3(key)
35-
gpt.set_model('gpt-4')
36+
gpt.set_model(model)
3637

3738
# Try to get rephrased version
3839
try:
@@ -143,6 +144,7 @@ class Chatbot():
143144
conversation_memories = ''
144145
total_back_and_forth = [] # This will contain the entire conversation, preserved through recycling
145146
gpt_model = 'text-davinci-003' # This determines the model you're using for completion. Edit with self.set_model()
147+
model_selection = 'davinci' # This represents what went into the set_model function
146148
max_tokens = 4000
147149
tokens = 0 # This represents the current token consumption
148150
full_conversation = ''
@@ -286,7 +288,7 @@ def say_to_chatbot(self, text: str, outloud: bool = True,
286288
# Also manage token count here
287289
if declares_self_ai(reply):
288290
try:
289-
new_response = stay_in_character(reply, self.api_key)
291+
new_response = stay_in_character(reply, self.api_key, self.model_selection)
290292

291293
if new_response[0]: # If the attempt was successful
292294
#self.tokens += new_response[2] # Add tokens to total
@@ -351,7 +353,7 @@ def recycle_tokens(self, chunk_by: int = 2, quiet=True):
351353
ct = 0 # This will count until a specified termination threshold to protect againt infinite loops
352354
terminate_value = len(chunks)
353355
errorct = 0
354-
gpt_model = self.gpt_model
356+
gpt_model = self.model_selection
355357

356358
# 1. Collect mini summaries for entire conversation
357359
info('Loading', 'topic')
@@ -449,7 +451,7 @@ def create_memories(self, chunk_by=2, quiet=True, restore=False):
449451
ct = 0 # This will count until a specified termination threshold to protect againt infinite loops
450452
terminate_value = len(chunks)
451453
errorct = 0
452-
model_placeholder = self.gpt_model
454+
model_placeholder = self.model_selection
453455

454456
memory_directive = ("Create a new single memory text dict with the following format:\n\n" +
455457
"{humans_job:[], humans_likes:[], humans_dislikes[], humans_personality:[], facts_about_human:[], things_discussed:[], humans_interests:[], things_to_remember:[]}\n\n" +
@@ -914,6 +916,7 @@ def set_model(self, desired_model: str, quiet=True):
914916
# 1. Set model
915917
self.gpt_model = models[desired_model][0]
916918
self.max_tokens = models[desired_model][1]
919+
self.model_selection = desired_model
917920

918921
# 2. Determine if max tokens are passed on new model
919922
if self.tokens >= self.max_tokens:

0 commit comments

Comments
 (0)