12
12
from general_functions import *
13
13
14
14
15
- def stay_in_character (message : str , key : str ) -> tuple :
15
+ def stay_in_character (message : str , key : str , model : str ) -> tuple :
16
16
"""
17
17
If the AI says something too robotic, this will have it stay in character.
18
18
19
19
:param message: This is the message the AI gave you.
20
20
:param key: This is your OpenAI key.
21
+ :param model: desired GPT model
21
22
:return: (bool representing success status, message, tokens)
22
23
"""
23
24
@@ -32,7 +33,7 @@ def stay_in_character(message: str, key: str) -> tuple:
32
33
33
34
# Setup GPT
34
35
gpt = GPT3 (key )
35
- gpt .set_model ('gpt-4' )
36
+ gpt .set_model (model )
36
37
37
38
# Try to get rephrased version
38
39
try :
@@ -143,6 +144,7 @@ class Chatbot():
143
144
conversation_memories = ''
144
145
total_back_and_forth = [] # This will contain the entire conversation, preserved through recycling
145
146
gpt_model = 'text-davinci-003' # This determines the model you're using for completion. Edit with self.set_model()
147
+ model_selection = 'davinci' # This represents what went into the set_model function
146
148
max_tokens = 4000
147
149
tokens = 0 # This represents the current token consumption
148
150
full_conversation = ''
@@ -286,7 +288,7 @@ def say_to_chatbot(self, text: str, outloud: bool = True,
286
288
# Also manage token count here
287
289
if declares_self_ai (reply ):
288
290
try :
289
- new_response = stay_in_character (reply , self .api_key )
291
+ new_response = stay_in_character (reply , self .api_key , self . model_selection )
290
292
291
293
if new_response [0 ]: # If the attempt was successful
292
294
#self.tokens += new_response[2] # Add tokens to total
@@ -351,7 +353,7 @@ def recycle_tokens(self, chunk_by: int = 2, quiet=True):
351
353
ct = 0 # This will count until a specified termination threshold to protect againt infinite loops
352
354
terminate_value = len (chunks )
353
355
errorct = 0
354
- gpt_model = self .gpt_model
356
+ gpt_model = self .model_selection
355
357
356
358
# 1. Collect mini summaries for entire conversation
357
359
info ('Loading' , 'topic' )
@@ -449,7 +451,7 @@ def create_memories(self, chunk_by=2, quiet=True, restore=False):
449
451
ct = 0 # This will count until a specified termination threshold to protect againt infinite loops
450
452
terminate_value = len (chunks )
451
453
errorct = 0
452
- model_placeholder = self .gpt_model
454
+ model_placeholder = self .model_selection
453
455
454
456
memory_directive = ("Create a new single memory text dict with the following format:\n \n " +
455
457
"{humans_job:[], humans_likes:[], humans_dislikes[], humans_personality:[], facts_about_human:[], things_discussed:[], humans_interests:[], things_to_remember:[]}\n \n " +
@@ -914,6 +916,7 @@ def set_model(self, desired_model: str, quiet=True):
914
916
# 1. Set model
915
917
self .gpt_model = models [desired_model ][0 ]
916
918
self .max_tokens = models [desired_model ][1 ]
919
+ self .model_selection = desired_model
917
920
918
921
# 2. Determine if max tokens are passed on new model
919
922
if self .tokens >= self .max_tokens :
0 commit comments